commit 683a4b84a4fa522a7a1f6c8aea358c0b02872010 Author: Ubuntu Date: Thu Jan 23 01:35:27 2025 +0800 first add diff --git a/backUpEs.sh b/backUpEs.sh new file mode 100755 index 0000000..169cdd5 --- /dev/null +++ b/backUpEs.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# 定义目录和备份文件路径 +SOURCE_DIR="/var/snap/microk8s/common/mnt/data/elasticsearch-data" +BACKUP_DIR="/home/ubuntu/data/tmp/backUp" +BACKUP_FILE="$BACKUP_DIR/backup-$(date '+%Y-%m-%d_%H-%M-%S').tar.gz" + +# 创建备份目录(如果不存在) +mkdir -p "$BACKUP_DIR" + +# 创建备份(保留elasticsearch-data目录结构) +echo "正在备份 $SOURCE_DIR 到 $BACKUP_FILE" +tar -czf "$BACKUP_FILE" -C "$(dirname "$SOURCE_DIR")" "$(basename "$SOURCE_DIR")" + +# 检查备份是否成功 +if [ $? -eq 0 ]; then + echo "备份成功: $BACKUP_FILE" +else + echo "备份失败" + exit 1 +fi + +# 保留最新的 8 个备 + diff --git a/busybox_default.yaml b/busybox_default.yaml new file mode 100644 index 0000000..6b8faff --- /dev/null +++ b/busybox_default.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ubuntu-deployment + namespace: default + labels: + app: ubuntu +spec: + replicas: 1 + selector: + matchLabels: + app: ubuntu + template: + metadata: + labels: + app: ubuntu + spec: + containers: + - name: ubuntu + image: ubuntu:latest + command: ["sh", "-c", "while true; do echo Hello Kubernetes!; sleep 3600; done"] + diff --git a/dockerFiles/vnc/start-vnc.sh b/dockerFiles/vnc/start-vnc.sh new file mode 100644 index 0000000..74e06b7 --- /dev/null +++ b/dockerFiles/vnc/start-vnc.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# 更新包列表并升级系统 +apt-get update && apt-get upgrade -y + +# 设置 VNC 密码 +if [ -z "$VNC_PASSWORD" ]; then + echo "Error: VNC_PASSWORD is not set" + exit 1 +fi + +mkdir -p ~/.vnc +echo "$VNC_PASSWORD" | vncpasswd -f > ~/.vnc/passwd +chmod 600 ~/.vnc/passwd + +# 设置环境变量 +export USER=root +export DISPLAY=:1 + +# 启动 XFCE 桌面环境和 VNC 服务器 +unset SESSION_MANAGER +unset DBUS_SESSION_BUS_ADDRESS +xrdb $HOME/.Xresources +startxfce4 & +vncserver :1 -geometry 1280x800 -depth 24 +# 启动 x11vnc 以支持剪切板 +x11vnc -display :1 -N -forever -shared & + +# 确保 VNC 服务器日志目录存在 +mkdir -p /root/.vnc + +# 保持容器运行 +tail -F /root/.vnc/*.log + diff --git a/dockerFiles/vnc/vnc.dockerfile b/dockerFiles/vnc/vnc.dockerfile new file mode 100644 index 0000000..713c9cf --- /dev/null +++ b/dockerFiles/vnc/vnc.dockerfile @@ -0,0 +1,32 @@ +# 基础镜像,使用 Ubuntu 并指定 ARM64 架构 +FROM ubuntu:20.04 + +# 设置环境变量,避免交互式提示 +ENV DEBIAN_FRONTEND=noninteractive + +# 更新系统并安装必要的软件 +RUN apt-get update && apt-get install -y \ + xfce4 \ + xfce4-goodies \ + tightvncserver \ + dbus-x11 \ + x11-xserver-utils \ + xvfb \ + xterm \ + wget \ + firefox \ + chromium-browser \ + vim \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# 设置 VNC 服务启动脚本 +COPY start-vnc.sh /usr/local/bin/start-vnc.sh +RUN chmod +x /usr/local/bin/start-vnc.sh + +# 设置容器启动时的命令 +CMD ["/usr/local/bin/start-vnc.sh"] + +# 暴露 VNC 端口 +EXPOSE 5901 + diff --git a/efk/README.md b/efk/README.md new file mode 100644 index 0000000..0cabc7b --- /dev/null +++ b/efk/README.md @@ -0,0 +1,65 @@ +## 如何重新创建整个namespace + +先删了 namespace efk,这样里面资源就都没了,除了pv +然后 microk8s.kubectl create namespace efk + + +option1: +1. 确保已经把本目录下config内容复制到 /var/snap/microk8s/common/mnt/data/elasticsearch-config + - sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true + - sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-config -p || true +2. ekf空间下资源部署,本目录下所有未被禁用的yaml + - chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-config && chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-data + - cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r + - 先装elasticsearch相关 + - 执行./createSecure_passwd.sh + 此时建议停下来看看elasticsearch的pod的状态是否正常,否则后面的步骤没有意义: microk8s.kubectl get all -n efk + - 再装fluentd相关 + - 执行 ./createFluentdAccoutnIn.sh + - 最后装kibana相关 + +-------- + +option2: + +1. 所有yaml都apply完了再 执行这个 + +``` +./createSecure_passwd.sh +``` + +创建了 +账号:elastic +密码:your_secure_password + +2. 然后创建服务账号给kibana, 并重新部署 + +``` +./refreshTokenForKibana.sh +``` +3. 确保已经把最新的traefik的loadbalance的ip配置到/etc/nginx/nginx.conf 的upstreeam里 +-------------- +上述option 二选一 +不管怎么样,最后: 浏览:http://kibana.k8s.xunlang.home + +在kibana的dev tool中执行: + +``` +PUT _index_template/logstash_template +{ + "index_patterns": ["logstash-*"], + "template": { + "settings": { + "number_of_replicas": 0 + } + } +} +``` + +如果已经fluentd已经开始上报log,需删掉已有index: + +``` +DELETE _template/logstash_template +DELETE /logstash-2024.11.09 + +``` diff --git a/efk/_git/COMMIT_EDITMSG b/efk/_git/COMMIT_EDITMSG new file mode 100644 index 0000000..3b387a9 --- /dev/null +++ b/efk/_git/COMMIT_EDITMSG @@ -0,0 +1 @@ +update README diff --git a/efk/_git/HEAD b/efk/_git/HEAD new file mode 100644 index 0000000..cb089cd --- /dev/null +++ b/efk/_git/HEAD @@ -0,0 +1 @@ +ref: refs/heads/master diff --git a/efk/_git/ORIG_HEAD b/efk/_git/ORIG_HEAD new file mode 100644 index 0000000..550c42d --- /dev/null +++ b/efk/_git/ORIG_HEAD @@ -0,0 +1 @@ +a829769ed1d706a7def1e750b9621cc14bf669ff diff --git a/efk/_git/config b/efk/_git/config new file mode 100644 index 0000000..b225b5c --- /dev/null +++ b/efk/_git/config @@ -0,0 +1,11 @@ +[core] + repositoryformatversion = 0 + filemode = true + bare = false + logallrefupdates = true +[remote "origin"] + url = baidu:repos/k8s/efk + fetch = +refs/heads/*:refs/remotes/origin/* +[branch "master"] + remote = origin + merge = refs/heads/master diff --git a/efk/_git/description b/efk/_git/description new file mode 100644 index 0000000..498b267 --- /dev/null +++ b/efk/_git/description @@ -0,0 +1 @@ +Unnamed repository; edit this file 'description' to name the repository. diff --git a/efk/_git/hooks/applypatch-msg.sample b/efk/_git/hooks/applypatch-msg.sample new file mode 100755 index 0000000..a5d7b84 --- /dev/null +++ b/efk/_git/hooks/applypatch-msg.sample @@ -0,0 +1,15 @@ +#!/bin/sh +# +# An example hook script to check the commit log message taken by +# applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. The hook is +# allowed to edit the commit message file. +# +# To enable this hook, rename this file to "applypatch-msg". + +. git-sh-setup +commitmsg="$(git rev-parse --git-path hooks/commit-msg)" +test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"} +: diff --git a/efk/_git/hooks/commit-msg.sample b/efk/_git/hooks/commit-msg.sample new file mode 100755 index 0000000..b58d118 --- /dev/null +++ b/efk/_git/hooks/commit-msg.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to check the commit log message. +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. The hook is allowed to edit the commit message file. +# +# To enable this hook, rename this file to "commit-msg". + +# Uncomment the below to add a Signed-off-by line to the message. +# Doing this in a hook is a bad idea in general, but the prepare-commit-msg +# hook is more suited to it. +# +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/efk/_git/hooks/fsmonitor-watchman.sample b/efk/_git/hooks/fsmonitor-watchman.sample new file mode 100755 index 0000000..14ed0aa --- /dev/null +++ b/efk/_git/hooks/fsmonitor-watchman.sample @@ -0,0 +1,173 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use IPC::Open2; + +# An example hook script to integrate Watchman +# (https://facebook.github.io/watchman/) with git to speed up detecting +# new and modified files. +# +# The hook is passed a version (currently 2) and last update token +# formatted as a string and outputs to stdout a new update token and +# all files that have been modified since the update token. Paths must +# be relative to the root of the working tree and separated by a single NUL. +# +# To enable this hook, rename this file to "query-watchman" and set +# 'git config core.fsmonitor .git/hooks/query-watchman' +# +my ($version, $last_update_token) = @ARGV; + +# Uncomment for debugging +# print STDERR "$0 $version $last_update_token\n"; + +# Check the hook interface version +if ($version ne 2) { + die "Unsupported query-fsmonitor hook version '$version'.\n" . + "Falling back to scanning...\n"; +} + +my $git_work_tree = get_working_dir(); + +my $retry = 1; + +my $json_pkg; +eval { + require JSON::XS; + $json_pkg = "JSON::XS"; + 1; +} or do { + require JSON::PP; + $json_pkg = "JSON::PP"; +}; + +launch_watchman(); + +sub launch_watchman { + my $o = watchman_query(); + if (is_work_tree_watched($o)) { + output_result($o->{clock}, @{$o->{files}}); + } +} + +sub output_result { + my ($clockid, @files) = @_; + + # Uncomment for debugging watchman output + # open (my $fh, ">", ".git/watchman-output.out"); + # binmode $fh, ":utf8"; + # print $fh "$clockid\n@files\n"; + # close $fh; + + binmode STDOUT, ":utf8"; + print $clockid; + print "\0"; + local $, = "\0"; + print @files; +} + +sub watchman_clock { + my $response = qx/watchman clock "$git_work_tree"/; + die "Failed to get clock id on '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + + return $json_pkg->new->utf8->decode($response); +} + +sub watchman_query { + my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') + or die "open2() failed: $!\n" . + "Falling back to scanning...\n"; + + # In the query expression below we're asking for names of files that + # changed since $last_update_token but not from the .git folder. + # + # To accomplish this, we're using the "since" generator to use the + # recency index to select candidate nodes and "fields" to limit the + # output to file names only. Then we're using the "expression" term to + # further constrain the results. + if (substr($last_update_token, 0, 1) eq "c") { + $last_update_token = "\"$last_update_token\""; + } + my $query = <<" END"; + ["query", "$git_work_tree", { + "since": $last_update_token, + "fields": ["name"], + "expression": ["not", ["dirname", ".git"]] + }] + END + + # Uncomment for debugging the watchman query + # open (my $fh, ">", ".git/watchman-query.json"); + # print $fh $query; + # close $fh; + + print CHLD_IN $query; + close CHLD_IN; + my $response = do {local $/; }; + + # Uncomment for debugging the watch response + # open ($fh, ">", ".git/watchman-response.json"); + # print $fh $response; + # close $fh; + + die "Watchman: command returned no output.\n" . + "Falling back to scanning...\n" if $response eq ""; + die "Watchman: command returned invalid output: $response\n" . + "Falling back to scanning...\n" unless $response =~ /^\{/; + + return $json_pkg->new->utf8->decode($response); +} + +sub is_work_tree_watched { + my ($output) = @_; + my $error = $output->{error}; + if ($retry > 0 and $error and $error =~ m/unable to resolve root .* directory (.*) is not watched/) { + $retry--; + my $response = qx/watchman watch "$git_work_tree"/; + die "Failed to make watchman watch '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + $output = $json_pkg->new->utf8->decode($response); + $error = $output->{error}; + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + # Uncomment for debugging watchman output + # open (my $fh, ">", ".git/watchman-output.out"); + # close $fh; + + # Watchman will always return all files on the first query so + # return the fast "everything is dirty" flag to git and do the + # Watchman query just to get it over with now so we won't pay + # the cost in git to look up each individual file. + my $o = watchman_clock(); + $error = $output->{error}; + + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + output_result($o->{clock}, ("/")); + $last_update_token = $o->{clock}; + + eval { launch_watchman() }; + return 0; + } + + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + return 1; +} + +sub get_working_dir { + my $working_dir; + if ($^O =~ 'msys' || $^O =~ 'cygwin') { + $working_dir = Win32::GetCwd(); + $working_dir =~ tr/\\/\//; + } else { + require Cwd; + $working_dir = Cwd::cwd(); + } + + return $working_dir; +} diff --git a/efk/_git/hooks/post-update.sample b/efk/_git/hooks/post-update.sample new file mode 100755 index 0000000..ec17ec1 --- /dev/null +++ b/efk/_git/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/efk/_git/hooks/pre-applypatch.sample b/efk/_git/hooks/pre-applypatch.sample new file mode 100755 index 0000000..4142082 --- /dev/null +++ b/efk/_git/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +precommit="$(git rev-parse --git-path hooks/pre-commit)" +test -x "$precommit" && exec "$precommit" ${1+"$@"} +: diff --git a/efk/_git/hooks/pre-commit.sample b/efk/_git/hooks/pre-commit.sample new file mode 100755 index 0000000..e144712 --- /dev/null +++ b/efk/_git/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=$(git hash-object -t tree /dev/null) +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --type=bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/efk/_git/hooks/pre-merge-commit.sample b/efk/_git/hooks/pre-merge-commit.sample new file mode 100755 index 0000000..399eab1 --- /dev/null +++ b/efk/_git/hooks/pre-merge-commit.sample @@ -0,0 +1,13 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git merge" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message to +# stderr if it wants to stop the merge commit. +# +# To enable this hook, rename this file to "pre-merge-commit". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" +: diff --git a/efk/_git/hooks/pre-push.sample b/efk/_git/hooks/pre-push.sample new file mode 100755 index 0000000..4ce688d --- /dev/null +++ b/efk/_git/hooks/pre-push.sample @@ -0,0 +1,53 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +zero=$(git hash-object --stdin &2 "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/efk/_git/hooks/pre-rebase.sample b/efk/_git/hooks/pre-rebase.sample new file mode 100755 index 0000000..6cbef5c --- /dev/null +++ b/efk/_git/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up to date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +<<\DOC_END + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". + +DOC_END diff --git a/efk/_git/hooks/pre-receive.sample b/efk/_git/hooks/pre-receive.sample new file mode 100755 index 0000000..a1fd29e --- /dev/null +++ b/efk/_git/hooks/pre-receive.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to make use of push options. +# The example simply echoes all push options that start with 'echoback=' +# and rejects all pushes when the "reject" push option is used. +# +# To enable this hook, rename this file to "pre-receive". + +if test -n "$GIT_PUSH_OPTION_COUNT" +then + i=0 + while test "$i" -lt "$GIT_PUSH_OPTION_COUNT" + do + eval "value=\$GIT_PUSH_OPTION_$i" + case "$value" in + echoback=*) + echo "echo from the pre-receive-hook: ${value#*=}" >&2 + ;; + reject) + exit 1 + esac + i=$((i + 1)) + done +fi diff --git a/efk/_git/hooks/prepare-commit-msg.sample b/efk/_git/hooks/prepare-commit-msg.sample new file mode 100755 index 0000000..10fa14c --- /dev/null +++ b/efk/_git/hooks/prepare-commit-msg.sample @@ -0,0 +1,42 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first one removes the +# "# Please enter the commit message..." help message. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +COMMIT_MSG_FILE=$1 +COMMIT_SOURCE=$2 +SHA1=$3 + +/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE" + +# case "$COMMIT_SOURCE,$SHA1" in +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;; +# *) ;; +# esac + +# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE" +# if test -z "$COMMIT_SOURCE" +# then +# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE" +# fi diff --git a/efk/_git/hooks/push-to-checkout.sample b/efk/_git/hooks/push-to-checkout.sample new file mode 100755 index 0000000..af5a0c0 --- /dev/null +++ b/efk/_git/hooks/push-to-checkout.sample @@ -0,0 +1,78 @@ +#!/bin/sh + +# An example hook script to update a checked-out tree on a git push. +# +# This hook is invoked by git-receive-pack(1) when it reacts to git +# push and updates reference(s) in its repository, and when the push +# tries to update the branch that is currently checked out and the +# receive.denyCurrentBranch configuration variable is set to +# updateInstead. +# +# By default, such a push is refused if the working tree and the index +# of the remote repository has any difference from the currently +# checked out commit; when both the working tree and the index match +# the current commit, they are updated to match the newly pushed tip +# of the branch. This hook is to be used to override the default +# behaviour; however the code below reimplements the default behaviour +# as a starting point for convenient modification. +# +# The hook receives the commit with which the tip of the current +# branch is going to be updated: +commit=$1 + +# It can exit with a non-zero status to refuse the push (when it does +# so, it must not modify the index or the working tree). +die () { + echo >&2 "$*" + exit 1 +} + +# Or it can make any necessary changes to the working tree and to the +# index to bring them to the desired state when the tip of the current +# branch is updated to the new commit, and exit with a zero status. +# +# For example, the hook can simply run git read-tree -u -m HEAD "$1" +# in order to emulate git fetch that is run in the reverse direction +# with git push, as the two-tree form of git read-tree -u -m is +# essentially the same as git switch or git checkout that switches +# branches while keeping the local changes in the working tree that do +# not interfere with the difference between the branches. + +# The below is a more-or-less exact translation to shell of the C code +# for the default behaviour for git's push-to-checkout hook defined in +# the push_to_deploy() function in builtin/receive-pack.c. +# +# Note that the hook will be executed from the repository directory, +# not from the working tree, so if you want to perform operations on +# the working tree, you will have to adapt your code accordingly, e.g. +# by adding "cd .." or using relative paths. + +if ! git update-index -q --ignore-submodules --refresh +then + die "Up-to-date check failed" +fi + +if ! git diff-files --quiet --ignore-submodules -- +then + die "Working directory has unstaged changes" +fi + +# This is a rough translation of: +# +# head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX +if git cat-file -e HEAD 2>/dev/null +then + head=HEAD +else + head=$(git hash-object -t tree --stdin &2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --type=bool hooks.allowunannotated) +allowdeletebranch=$(git config --type=bool hooks.allowdeletebranch) +denycreatebranch=$(git config --type=bool hooks.denycreatebranch) +allowdeletetag=$(git config --type=bool hooks.allowdeletetag) +allowmodifytag=$(git config --type=bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero=$(git hash-object --stdin &2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/efk/_git/index b/efk/_git/index new file mode 100644 index 0000000..25fa622 Binary files /dev/null and b/efk/_git/index differ diff --git a/efk/_git/info/exclude b/efk/_git/info/exclude new file mode 100644 index 0000000..a5196d1 --- /dev/null +++ b/efk/_git/info/exclude @@ -0,0 +1,6 @@ +# git ls-files --others --exclude-from=.git/info/exclude +# Lines that start with '#' are comments. +# For a project mostly in C, the following would be a good set of +# exclude patterns (uncomment them if you want to use them): +# *.[oa] +# *~ diff --git a/efk/_git/logs/HEAD b/efk/_git/logs/HEAD new file mode 100644 index 0000000..d5eb6af --- /dev/null +++ b/efk/_git/logs/HEAD @@ -0,0 +1,11 @@ +0000000000000000000000000000000000000000 5ab8067b9f81af0270a90c12bcfcc7f071d685dd zhangkun 1727151229 +0800 commit (initial): fluentd almost done +5ab8067b9f81af0270a90c12bcfcc7f071d685dd 6e2d38a87357e2767c21dc7e2d482ac46eef6d42 zhangkun 1727151284 +0800 commit: remove tar +6e2d38a87357e2767c21dc7e2d482ac46eef6d42 3fe1c4b81d79fc517c95dc32299bfa67841e58a4 zhangkun 1734359708 +0800 commit: up +3fe1c4b81d79fc517c95dc32299bfa67841e58a4 0dab4baba98695bc8ffbce60283bb5d7111290c6 zhangkun 1734855389 +0800 commit: fluentd设置为东8区 +0dab4baba98695bc8ffbce60283bb5d7111290c6 d895728b90c5cd39f496bc80f8089c2728de8720 zhangkun 1734918087 +0800 commit: fluentd强制使用东八区 +d895728b90c5cd39f496bc80f8089c2728de8720 32f2a58edd56f35d6d2689b8cab7cc347b12bddf zhangkun 1734925200 +0800 commit: hash散列进行日志分组,不再使用日期分组 +32f2a58edd56f35d6d2689b8cab7cc347b12bddf 72ec36fc335eb7fc761df63f3231aecb4b1048d1 zhangkun 1734944524 +0800 commit: fluentd 改成 1秒钟一刷新,防止堆砌请求不发 +72ec36fc335eb7fc761df63f3231aecb4b1048d1 a829769ed1d706a7def1e750b9621cc14bf669ff phyer 1735051930 +0800 commit: fluentd恢复到以日为单位index +a829769ed1d706a7def1e750b9621cc14bf669ff a829769ed1d706a7def1e750b9621cc14bf669ff phyer 1735093625 +0800 reset: moving to HEAD +a829769ed1d706a7def1e750b9621cc14bf669ff 6e401b3546099e5e9a231979ec967b5d2f92c844 phyer 1735093967 +0800 commit: fluentd works fine with es +6e401b3546099e5e9a231979ec967b5d2f92c844 1a146cad3fb1f6d08e44e811c747b658b62fb85e phyer 1735263819 +0800 commit: update README diff --git a/efk/_git/logs/refs/heads/master b/efk/_git/logs/refs/heads/master new file mode 100644 index 0000000..98cac9a --- /dev/null +++ b/efk/_git/logs/refs/heads/master @@ -0,0 +1,10 @@ +0000000000000000000000000000000000000000 5ab8067b9f81af0270a90c12bcfcc7f071d685dd zhangkun 1727151229 +0800 commit (initial): fluentd almost done +5ab8067b9f81af0270a90c12bcfcc7f071d685dd 6e2d38a87357e2767c21dc7e2d482ac46eef6d42 zhangkun 1727151284 +0800 commit: remove tar +6e2d38a87357e2767c21dc7e2d482ac46eef6d42 3fe1c4b81d79fc517c95dc32299bfa67841e58a4 zhangkun 1734359708 +0800 commit: up +3fe1c4b81d79fc517c95dc32299bfa67841e58a4 0dab4baba98695bc8ffbce60283bb5d7111290c6 zhangkun 1734855389 +0800 commit: fluentd设置为东8区 +0dab4baba98695bc8ffbce60283bb5d7111290c6 d895728b90c5cd39f496bc80f8089c2728de8720 zhangkun 1734918087 +0800 commit: fluentd强制使用东八区 +d895728b90c5cd39f496bc80f8089c2728de8720 32f2a58edd56f35d6d2689b8cab7cc347b12bddf zhangkun 1734925200 +0800 commit: hash散列进行日志分组,不再使用日期分组 +32f2a58edd56f35d6d2689b8cab7cc347b12bddf 72ec36fc335eb7fc761df63f3231aecb4b1048d1 zhangkun 1734944524 +0800 commit: fluentd 改成 1秒钟一刷新,防止堆砌请求不发 +72ec36fc335eb7fc761df63f3231aecb4b1048d1 a829769ed1d706a7def1e750b9621cc14bf669ff phyer 1735051930 +0800 commit: fluentd恢复到以日为单位index +a829769ed1d706a7def1e750b9621cc14bf669ff 6e401b3546099e5e9a231979ec967b5d2f92c844 phyer 1735093967 +0800 commit: fluentd works fine with es +6e401b3546099e5e9a231979ec967b5d2f92c844 1a146cad3fb1f6d08e44e811c747b658b62fb85e phyer 1735263819 +0800 commit: update README diff --git a/efk/_git/logs/refs/remotes/origin/master b/efk/_git/logs/refs/remotes/origin/master new file mode 100644 index 0000000..07d69ef --- /dev/null +++ b/efk/_git/logs/refs/remotes/origin/master @@ -0,0 +1,9 @@ +0000000000000000000000000000000000000000 6e2d38a87357e2767c21dc7e2d482ac46eef6d42 zhangkun 1727151307 +0800 update by push +6e2d38a87357e2767c21dc7e2d482ac46eef6d42 3fe1c4b81d79fc517c95dc32299bfa67841e58a4 zhangkun 1734359709 +0800 update by push +3fe1c4b81d79fc517c95dc32299bfa67841e58a4 0dab4baba98695bc8ffbce60283bb5d7111290c6 zhangkun 1734855389 +0800 update by push +0dab4baba98695bc8ffbce60283bb5d7111290c6 d895728b90c5cd39f496bc80f8089c2728de8720 zhangkun 1734918087 +0800 update by push +d895728b90c5cd39f496bc80f8089c2728de8720 32f2a58edd56f35d6d2689b8cab7cc347b12bddf zhangkun 1734925202 +0800 update by push +32f2a58edd56f35d6d2689b8cab7cc347b12bddf 72ec36fc335eb7fc761df63f3231aecb4b1048d1 zhangkun 1734944525 +0800 update by push +72ec36fc335eb7fc761df63f3231aecb4b1048d1 a829769ed1d706a7def1e750b9621cc14bf669ff phyer 1735051931 +0800 update by push +a829769ed1d706a7def1e750b9621cc14bf669ff 6e401b3546099e5e9a231979ec967b5d2f92c844 phyer 1735093968 +0800 update by push +6e401b3546099e5e9a231979ec967b5d2f92c844 1a146cad3fb1f6d08e44e811c747b658b62fb85e phyer 1735263819 +0800 update by push diff --git a/efk/_git/objects/02/26110dde255c6ffc43198f84d4a40707dfa731 b/efk/_git/objects/02/26110dde255c6ffc43198f84d4a40707dfa731 new file mode 100644 index 0000000..1d42f84 Binary files /dev/null and b/efk/_git/objects/02/26110dde255c6ffc43198f84d4a40707dfa731 differ diff --git a/efk/_git/objects/06/006ecfc4f47b4f5646b223e8b496eaba139de9 b/efk/_git/objects/06/006ecfc4f47b4f5646b223e8b496eaba139de9 new file mode 100644 index 0000000..c1500b4 --- /dev/null +++ b/efk/_git/objects/06/006ecfc4f47b4f5646b223e8b496eaba139de9 @@ -0,0 +1,2 @@ +xU0 DViCł7ʂƐmR%OyNv-4氡_9e¼W?/4, 42B[ +늢~lM8 | s9U.fAЦ* ⁲.3##ܴ(cԶp8Nοy%brC \ No newline at end of file diff --git a/efk/_git/objects/08/2ca72b26e5f03b39cfbc723a0a382bf7ac0c24 b/efk/_git/objects/08/2ca72b26e5f03b39cfbc723a0a382bf7ac0c24 new file mode 100644 index 0000000..857e2ff Binary files /dev/null and b/efk/_git/objects/08/2ca72b26e5f03b39cfbc723a0a382bf7ac0c24 differ diff --git a/efk/_git/objects/0d/ab4baba98695bc8ffbce60283bb5d7111290c6 b/efk/_git/objects/0d/ab4baba98695bc8ffbce60283bb5d7111290c6 new file mode 100644 index 0000000..7226f4b Binary files /dev/null and b/efk/_git/objects/0d/ab4baba98695bc8ffbce60283bb5d7111290c6 differ diff --git a/efk/_git/objects/14/8e119ea3e465654425462f676813feb488274e b/efk/_git/objects/14/8e119ea3e465654425462f676813feb488274e new file mode 100644 index 0000000..8a6259d Binary files /dev/null and b/efk/_git/objects/14/8e119ea3e465654425462f676813feb488274e differ diff --git a/efk/_git/objects/16/2ffba9f7f394d9096e247dc8019aa34d0fb50d b/efk/_git/objects/16/2ffba9f7f394d9096e247dc8019aa34d0fb50d new file mode 100644 index 0000000..b71c4b4 Binary files /dev/null and b/efk/_git/objects/16/2ffba9f7f394d9096e247dc8019aa34d0fb50d differ diff --git a/efk/_git/objects/17/84319c10616bd78f08424ac592f7f9e808035b b/efk/_git/objects/17/84319c10616bd78f08424ac592f7f9e808035b new file mode 100644 index 0000000..993b0ce Binary files /dev/null and b/efk/_git/objects/17/84319c10616bd78f08424ac592f7f9e808035b differ diff --git a/efk/_git/objects/1a/146cad3fb1f6d08e44e811c747b658b62fb85e b/efk/_git/objects/1a/146cad3fb1f6d08e44e811c747b658b62fb85e new file mode 100644 index 0000000..e150fa5 --- /dev/null +++ b/efk/_git/objects/1a/146cad3fb1f6d08e44e811c747b658b62fb85e @@ -0,0 +1,2 @@ +x= +1Fs$L,k?l]<e^M;h$1q {5kX*sA=HP(;IB챸'UڸT5/E$Kh1jj[Kn> \ No newline at end of file diff --git a/efk/_git/objects/1e/248cb28bfcf76da1b673e8a86dde4f36fc8ed4 b/efk/_git/objects/1e/248cb28bfcf76da1b673e8a86dde4f36fc8ed4 new file mode 100644 index 0000000..5e63996 Binary files /dev/null and b/efk/_git/objects/1e/248cb28bfcf76da1b673e8a86dde4f36fc8ed4 differ diff --git a/efk/_git/objects/24/9b7c4d501705786d1da2a66962be16eb8501ce b/efk/_git/objects/24/9b7c4d501705786d1da2a66962be16eb8501ce new file mode 100644 index 0000000..e4d878c Binary files /dev/null and b/efk/_git/objects/24/9b7c4d501705786d1da2a66962be16eb8501ce differ diff --git a/efk/_git/objects/24/ff26fd41346ec13c111476fe04edea71b25986 b/efk/_git/objects/24/ff26fd41346ec13c111476fe04edea71b25986 new file mode 100644 index 0000000..b4fb867 --- /dev/null +++ b/efk/_git/objects/24/ff26fd41346ec13c111476fe04edea71b25986 @@ -0,0 +1,2 @@ +xM +0D=+P={d6Yп7o3o3e$wZI-iufJ9zs]S`"B Qž4"[ t4[R=O4W"E}gZA_7ׂx7c8" \ No newline at end of file diff --git a/efk/_git/objects/2b/dba40924cf6a146385204a07cc44f78bb57f3b b/efk/_git/objects/2b/dba40924cf6a146385204a07cc44f78bb57f3b new file mode 100644 index 0000000..fc1a860 Binary files /dev/null and b/efk/_git/objects/2b/dba40924cf6a146385204a07cc44f78bb57f3b differ diff --git a/efk/_git/objects/2f/30e89cd818accfadc9a3a54e4234a5027555e7 b/efk/_git/objects/2f/30e89cd818accfadc9a3a54e4234a5027555e7 new file mode 100644 index 0000000..ebe84d5 Binary files /dev/null and b/efk/_git/objects/2f/30e89cd818accfadc9a3a54e4234a5027555e7 differ diff --git a/efk/_git/objects/31/a1de8d8dc287c4581ab226e973a8d1248cede9 b/efk/_git/objects/31/a1de8d8dc287c4581ab226e973a8d1248cede9 new file mode 100644 index 0000000..59fc82f Binary files /dev/null and b/efk/_git/objects/31/a1de8d8dc287c4581ab226e973a8d1248cede9 differ diff --git a/efk/_git/objects/31/c0af479f9950482931ee415ebd2ac6173b5ccb b/efk/_git/objects/31/c0af479f9950482931ee415ebd2ac6173b5ccb new file mode 100644 index 0000000..6202c0e --- /dev/null +++ b/efk/_git/objects/31/c0af479f9950482931ee415ebd2ac6173b5ccb @@ -0,0 +1,3 @@ +xMkQ]ϯ8f]̌"D;TbEpg瓹Wc]i"~b%ɝIb6y1Ή55в 0=6K(M$2;'/7a55o]A_+,96Y@ (.0,Nj*<V.5f2bFTx) h(n r3Ϸ>A1xg/~/>!A_'hJ:( +b`V(FmL.2i(q>V Ui '0txӽv .޻z&zWO0-Š5>?`9I~w?3|57&(jRVhrV疘•͑·77RR~JZߧygOW׭>KZ Ag \ No newline at end of file diff --git a/efk/_git/objects/32/f2a58edd56f35d6d2689b8cab7cc347b12bddf b/efk/_git/objects/32/f2a58edd56f35d6d2689b8cab7cc347b12bddf new file mode 100644 index 0000000..eb543e7 Binary files /dev/null and b/efk/_git/objects/32/f2a58edd56f35d6d2689b8cab7cc347b12bddf differ diff --git a/efk/_git/objects/34/fd91d9f157c5281ee844d0a0306c7284cb9764 b/efk/_git/objects/34/fd91d9f157c5281ee844d0a0306c7284cb9764 new file mode 100644 index 0000000..8afa5f1 Binary files /dev/null and b/efk/_git/objects/34/fd91d9f157c5281ee844d0a0306c7284cb9764 differ diff --git a/efk/_git/objects/39/161d1a6c2d395061d45a1d66f0c141a5cbe423 b/efk/_git/objects/39/161d1a6c2d395061d45a1d66f0c141a5cbe423 new file mode 100644 index 0000000..32ebdac Binary files /dev/null and b/efk/_git/objects/39/161d1a6c2d395061d45a1d66f0c141a5cbe423 differ diff --git a/efk/_git/objects/3b/f9821d6441c0a9785cd45b9972cf1d65707d4f b/efk/_git/objects/3b/f9821d6441c0a9785cd45b9972cf1d65707d4f new file mode 100644 index 0000000..345be75 Binary files /dev/null and b/efk/_git/objects/3b/f9821d6441c0a9785cd45b9972cf1d65707d4f differ diff --git a/efk/_git/objects/3c/74311865061a6fa0ec256773eac72236acd496 b/efk/_git/objects/3c/74311865061a6fa0ec256773eac72236acd496 new file mode 100644 index 0000000..50596a0 Binary files /dev/null and b/efk/_git/objects/3c/74311865061a6fa0ec256773eac72236acd496 differ diff --git a/efk/_git/objects/3f/e1c4b81d79fc517c95dc32299bfa67841e58a4 b/efk/_git/objects/3f/e1c4b81d79fc517c95dc32299bfa67841e58a4 new file mode 100644 index 0000000..73b8a1d Binary files /dev/null and b/efk/_git/objects/3f/e1c4b81d79fc517c95dc32299bfa67841e58a4 differ diff --git a/efk/_git/objects/47/74604e47751b7807fb1d3d19d322926036f970 b/efk/_git/objects/47/74604e47751b7807fb1d3d19d322926036f970 new file mode 100644 index 0000000..baaa46e Binary files /dev/null and b/efk/_git/objects/47/74604e47751b7807fb1d3d19d322926036f970 differ diff --git a/efk/_git/objects/50/b154702b9419377321d231c4a476eab3a60e07 b/efk/_git/objects/50/b154702b9419377321d231c4a476eab3a60e07 new file mode 100644 index 0000000..8c061bf Binary files /dev/null and b/efk/_git/objects/50/b154702b9419377321d231c4a476eab3a60e07 differ diff --git a/efk/_git/objects/53/bd39da507497d3674d64f86f3b27d3aa2ca648 b/efk/_git/objects/53/bd39da507497d3674d64f86f3b27d3aa2ca648 new file mode 100644 index 0000000..99e3504 --- /dev/null +++ b/efk/_git/objects/53/bd39da507497d3674d64f86f3b27d3aa2ca648 @@ -0,0 +1,3 @@ +xeO0 SD&`-mYoHknvMM6 /gqr3w 6XY-GZk G9Z?,1S +sWU^J=܆e#˅q_ +j#ebbA2ddoh1 \ No newline at end of file diff --git a/efk/_git/objects/54/37f614d580c4c1cde3949366dceca6ef62f361 b/efk/_git/objects/54/37f614d580c4c1cde3949366dceca6ef62f361 new file mode 100644 index 0000000..c2b65fe --- /dev/null +++ b/efk/_git/objects/54/37f614d580c4c1cde3949366dceca6ef62f361 @@ -0,0 +1,2 @@ +xuQn0 %@KStZHt,Dn_!mIwOh'9L.FfnR?&51Jdkʠ-5,Eޢ$^2 <ܛ :#_-AlcAIPmq F<:J9V'pQ5})awy,XDn8۞ NT;rw#Je<#?eizQQۣb.EuX890=Ҹ kl"%O+]JNGzoa voLM!ƴBoP/P; +'V \ No newline at end of file diff --git a/efk/_git/objects/57/9f79191ecd0b484aae1c868f42b607d01934d3 b/efk/_git/objects/57/9f79191ecd0b484aae1c868f42b607d01934d3 new file mode 100644 index 0000000..440155c Binary files /dev/null and b/efk/_git/objects/57/9f79191ecd0b484aae1c868f42b607d01934d3 differ diff --git a/efk/_git/objects/59/434558ba2ca4acd35c457dc610e3fe011c3631 b/efk/_git/objects/59/434558ba2ca4acd35c457dc610e3fe011c3631 new file mode 100644 index 0000000..e75dcd2 Binary files /dev/null and b/efk/_git/objects/59/434558ba2ca4acd35c457dc610e3fe011c3631 differ diff --git a/efk/_git/objects/5a/b8067b9f81af0270a90c12bcfcc7f071d685dd b/efk/_git/objects/5a/b8067b9f81af0270a90c12bcfcc7f071d685dd new file mode 100644 index 0000000..4052655 --- /dev/null +++ b/efk/_git/objects/5a/b8067b9f81af0270a90c12bcfcc7f071d685dd @@ -0,0 +1,2 @@ +x= +1@abz I@0&uDB"J2tk .y_j?ihP"3 \ No newline at end of file diff --git a/efk/_git/objects/5a/ee016ffce5821abd2e85a2181644003b8b086f b/efk/_git/objects/5a/ee016ffce5821abd2e85a2181644003b8b086f new file mode 100644 index 0000000..85da11e --- /dev/null +++ b/efk/_git/objects/5a/ee016ffce5821abd2e85a2181644003b8b086f @@ -0,0 +1,2 @@ +xAk@=xՀbԓBsM&qfwɮXQ{#К7ի0_\u \ No newline at end of file diff --git a/efk/_git/objects/5b/b63ce18966acd771ae1100ad3840b087e86c11 b/efk/_git/objects/5b/b63ce18966acd771ae1100ad3840b087e86c11 new file mode 100644 index 0000000..825aeff Binary files /dev/null and b/efk/_git/objects/5b/b63ce18966acd771ae1100ad3840b087e86c11 differ diff --git a/efk/_git/objects/5c/4dd98964594e7e48d09a6e3c4c02859e982ecd b/efk/_git/objects/5c/4dd98964594e7e48d09a6e3c4c02859e982ecd new file mode 100644 index 0000000..58f1d1e Binary files /dev/null and b/efk/_git/objects/5c/4dd98964594e7e48d09a6e3c4c02859e982ecd differ diff --git a/efk/_git/objects/5e/0e9ca55b8c8983c3c428a71465f21587185c38 b/efk/_git/objects/5e/0e9ca55b8c8983c3c428a71465f21587185c38 new file mode 100644 index 0000000..3bf193e Binary files /dev/null and b/efk/_git/objects/5e/0e9ca55b8c8983c3c428a71465f21587185c38 differ diff --git a/efk/_git/objects/5f/8fd5eac7095357f18cccfa62ab79059b9afb5a b/efk/_git/objects/5f/8fd5eac7095357f18cccfa62ab79059b9afb5a new file mode 100644 index 0000000..ba39669 --- /dev/null +++ b/efk/_git/objects/5f/8fd5eac7095357f18cccfa62ab79059b9afb5a @@ -0,0 +1,2 @@ +x] +0 =)ruCPkx¶$E}{;0_ϣӮUHo1I3 19W;`M֏XS~Aq[;fQqGH2#X_FᰦMmpLMȮ"dChvM([9mՂVq/,lUju \ No newline at end of file diff --git a/efk/_git/objects/62/b6e0e63492a7362a69bc0b9fef5871acc5721b b/efk/_git/objects/62/b6e0e63492a7362a69bc0b9fef5871acc5721b new file mode 100644 index 0000000..296787a Binary files /dev/null and b/efk/_git/objects/62/b6e0e63492a7362a69bc0b9fef5871acc5721b differ diff --git a/efk/_git/objects/64/818921420de580c202c0a127708a4c6a0898b4 b/efk/_git/objects/64/818921420de580c202c0a127708a4c6a0898b4 new file mode 100644 index 0000000..4d5744d Binary files /dev/null and b/efk/_git/objects/64/818921420de580c202c0a127708a4c6a0898b4 differ diff --git a/efk/_git/objects/68/c82f7e7c05b02e9aa539262ffa2df332cd078e b/efk/_git/objects/68/c82f7e7c05b02e9aa539262ffa2df332cd078e new file mode 100644 index 0000000..69d73d5 --- /dev/null +++ b/efk/_git/objects/68/c82f7e7c05b02e9aa539262ffa2df332cd078e @@ -0,0 +1 @@ +xj1SS . W)R;HZ}V>Lr!Bftr|+;W10AsT*F+OgcQT.VgRI6I|~(FAap pDR5bSЀ<b3䨰&!̜r~cySzk݌YW!Qjl㲰}֗Mi-,ӵ"5 \ No newline at end of file diff --git a/efk/_git/objects/68/e003b8cdedd9b83a63565a4ab01bd5378d5a73 b/efk/_git/objects/68/e003b8cdedd9b83a63565a4ab01bd5378d5a73 new file mode 100644 index 0000000..bdcdf86 --- /dev/null +++ b/efk/_git/objects/68/e003b8cdedd9b83a63565a4ab01bd5378d5a73 @@ -0,0 +1,2 @@ +xU10DQbAT)`MJ7Z۠hqAmb 쩄 ^Cx^ +Ju{63IߘDIjں5-1k.[Q@(} UCzp$MxM7_kgZ51I7=5 \ No newline at end of file diff --git a/efk/_git/objects/6e/401b3546099e5e9a231979ec967b5d2f92c844 b/efk/_git/objects/6e/401b3546099e5e9a231979ec967b5d2f92c844 new file mode 100644 index 0000000..89832de --- /dev/null +++ b/efk/_git/objects/6e/401b3546099e5e9a231979ec967b5d2f92c844 @@ -0,0 +1,2 @@ +x[ + E*PԨR3$y` tpNZy` ]jan9t$}Dh4QcvXx{KɨCb(Xq@"*u\ l ܧ%:?`5uZ5ںjk(y(ù2- TG]}Ej \ No newline at end of file diff --git a/efk/_git/objects/72/a98f54138e9cb90ca6d02cc2b54ebda3233f41 b/efk/_git/objects/72/a98f54138e9cb90ca6d02cc2b54ebda3233f41 new file mode 100644 index 0000000..c15e41d --- /dev/null +++ b/efk/_git/objects/72/a98f54138e9cb90ca6d02cc2b54ebda3233f41 @@ -0,0 +1,2 @@ +xUM +0){SJхB8mHfM.=~\`q/삷zt=x0cÆ&`qh0\JVwD$0UZ{ؕ 20gہmLu7Ur0^% ⷥw"pԑΓ2{oMN_ \ No newline at end of file diff --git a/efk/_git/objects/72/ec36fc335eb7fc761df63f3231aecb4b1048d1 b/efk/_git/objects/72/ec36fc335eb7fc761df63f3231aecb4b1048d1 new file mode 100644 index 0000000..e676e8f --- /dev/null +++ b/efk/_git/objects/72/ec36fc335eb7fc761df63f3231aecb4b1048d1 @@ -0,0 +1,2 @@ +xοNB1q>YzO.i+Fޅ8Bq8,`p LG,Ju$aT‰#r+ -qB.m#ZeBci*g:M$v:#mm}zpW'Ph!+)Jq9_T +Yh,#On=-fMszϏz_ju7s `e \ No newline at end of file diff --git a/efk/_git/objects/77/4992a80975f8041464330fe1a2ff3e6b76dde7 b/efk/_git/objects/77/4992a80975f8041464330fe1a2ff3e6b76dde7 new file mode 100644 index 0000000..1d36d9f Binary files /dev/null and b/efk/_git/objects/77/4992a80975f8041464330fe1a2ff3e6b76dde7 differ diff --git a/efk/_git/objects/77/70b74377b67eb34d9b0a6d9393004b05f45370 b/efk/_git/objects/77/70b74377b67eb34d9b0a6d9393004b05f45370 new file mode 100644 index 0000000..83c9a82 Binary files /dev/null and b/efk/_git/objects/77/70b74377b67eb34d9b0a6d9393004b05f45370 differ diff --git a/efk/_git/objects/7b/1201f49cebbc3f66ad1066ebf9a712ef98c417 b/efk/_git/objects/7b/1201f49cebbc3f66ad1066ebf9a712ef98c417 new file mode 100644 index 0000000..0fcedad --- /dev/null +++ b/efk/_git/objects/7b/1201f49cebbc3f66ad1066ebf9a712ef98c417 @@ -0,0 +1,3 @@ +xeͱ0a>IA !qC': +hZ[BKwIΟ+b !lt?ozI)d)w +94jP)BRRu]1ͬ`( 6x5W-~Iu* \ No newline at end of file diff --git a/efk/_git/objects/9c/7dad304d827d2fc3b0b1c5f6cf01323d8ccda2 b/efk/_git/objects/9c/7dad304d827d2fc3b0b1c5f6cf01323d8ccda2 new file mode 100644 index 0000000..a80fcf4 Binary files /dev/null and b/efk/_git/objects/9c/7dad304d827d2fc3b0b1c5f6cf01323d8ccda2 differ diff --git a/efk/_git/objects/a5/40c2b59f1bbdd8a5251132d1fe71e52267886d b/efk/_git/objects/a5/40c2b59f1bbdd8a5251132d1fe71e52267886d new file mode 100644 index 0000000..3bc0464 Binary files /dev/null and b/efk/_git/objects/a5/40c2b59f1bbdd8a5251132d1fe71e52267886d differ diff --git a/efk/_git/objects/a6/b25727bfc8569b3d15e06755090ff99e433888 b/efk/_git/objects/a6/b25727bfc8569b3d15e06755090ff99e433888 new file mode 100644 index 0000000..912c6c7 Binary files /dev/null and b/efk/_git/objects/a6/b25727bfc8569b3d15e06755090ff99e433888 differ diff --git a/efk/_git/objects/a8/29769ed1d706a7def1e750b9621cc14bf669ff b/efk/_git/objects/a8/29769ed1d706a7def1e750b9621cc14bf669ff new file mode 100644 index 0000000..0f6b13b Binary files /dev/null and b/efk/_git/objects/a8/29769ed1d706a7def1e750b9621cc14bf669ff differ diff --git a/efk/_git/objects/a9/9d80c7f41ac091ddf72f57ed7df279726cc7d4 b/efk/_git/objects/a9/9d80c7f41ac091ddf72f57ed7df279726cc7d4 new file mode 100644 index 0000000..e7aa341 Binary files /dev/null and b/efk/_git/objects/a9/9d80c7f41ac091ddf72f57ed7df279726cc7d4 differ diff --git a/efk/_git/objects/ab/21c3d09c4ef95c83011996e69ac0c63e15f7e0 b/efk/_git/objects/ab/21c3d09c4ef95c83011996e69ac0c63e15f7e0 new file mode 100644 index 0000000..87e9ed0 Binary files /dev/null and b/efk/_git/objects/ab/21c3d09c4ef95c83011996e69ac0c63e15f7e0 differ diff --git a/efk/_git/objects/ab/ad64efcfda92e24af88a2300cb9151761ab526 b/efk/_git/objects/ab/ad64efcfda92e24af88a2300cb9151761ab526 new file mode 100644 index 0000000..63ab4fb Binary files /dev/null and b/efk/_git/objects/ab/ad64efcfda92e24af88a2300cb9151761ab526 differ diff --git a/efk/_git/objects/b6/874e915feecf6d779b8950af2991931f59b57d b/efk/_git/objects/b6/874e915feecf6d779b8950af2991931f59b57d new file mode 100644 index 0000000..a72da82 Binary files /dev/null and b/efk/_git/objects/b6/874e915feecf6d779b8950af2991931f59b57d differ diff --git a/efk/_git/objects/bd/ca028be8249bf77ee84873c18aec28add30aef b/efk/_git/objects/bd/ca028be8249bf77ee84873c18aec28add30aef new file mode 100644 index 0000000..7e17f42 Binary files /dev/null and b/efk/_git/objects/bd/ca028be8249bf77ee84873c18aec28add30aef differ diff --git a/efk/_git/objects/bf/a019c84bc156683905e689a26c2cec72f86eb1 b/efk/_git/objects/bf/a019c84bc156683905e689a26c2cec72f86eb1 new file mode 100644 index 0000000..2248947 Binary files /dev/null and b/efk/_git/objects/bf/a019c84bc156683905e689a26c2cec72f86eb1 differ diff --git a/efk/_git/objects/c0/d67c8e81403b8a31201ec3ef650775074bb440 b/efk/_git/objects/c0/d67c8e81403b8a31201ec3ef650775074bb440 new file mode 100644 index 0000000..72bdc98 Binary files /dev/null and b/efk/_git/objects/c0/d67c8e81403b8a31201ec3ef650775074bb440 differ diff --git a/efk/_git/objects/c1/3e6e0364723145970834e4e3963c388dfc4f54 b/efk/_git/objects/c1/3e6e0364723145970834e4e3963c388dfc4f54 new file mode 100644 index 0000000..1b170c0 Binary files /dev/null and b/efk/_git/objects/c1/3e6e0364723145970834e4e3963c388dfc4f54 differ diff --git a/efk/_git/objects/c1/d4da54a9ee5bb3cffa788d2a258ebe979afab6 b/efk/_git/objects/c1/d4da54a9ee5bb3cffa788d2a258ebe979afab6 new file mode 100644 index 0000000..6365704 Binary files /dev/null and b/efk/_git/objects/c1/d4da54a9ee5bb3cffa788d2a258ebe979afab6 differ diff --git a/efk/_git/objects/c2/7bfe3fe397a427d6e763825ad940792e07f135 b/efk/_git/objects/c2/7bfe3fe397a427d6e763825ad940792e07f135 new file mode 100644 index 0000000..f6c33d9 Binary files /dev/null and b/efk/_git/objects/c2/7bfe3fe397a427d6e763825ad940792e07f135 differ diff --git a/efk/_git/objects/cb/d9022a95280468cdb1a9d3dd8a355db1d358f7 b/efk/_git/objects/cb/d9022a95280468cdb1a9d3dd8a355db1d358f7 new file mode 100644 index 0000000..1af4a63 --- /dev/null +++ b/efk/_git/objects/cb/d9022a95280468cdb1a9d3dd8a355db1d358f7 @@ -0,0 +1 @@ +x=10C@ EcC,;1{RDtِKq>-QQjR3+ȰJo(VXe+aJtDOZcr|V5ttMO4 \ No newline at end of file diff --git a/efk/_git/objects/cd/d91eb008f1bedde561e99c7918cf56dd17a965 b/efk/_git/objects/cd/d91eb008f1bedde561e99c7918cf56dd17a965 new file mode 100644 index 0000000..e8d4842 Binary files /dev/null and b/efk/_git/objects/cd/d91eb008f1bedde561e99c7918cf56dd17a965 differ diff --git a/efk/_git/objects/d8/95728b90c5cd39f496bc80f8089c2728de8720 b/efk/_git/objects/d8/95728b90c5cd39f496bc80f8089c2728de8720 new file mode 100644 index 0000000..db5710a Binary files /dev/null and b/efk/_git/objects/d8/95728b90c5cd39f496bc80f8089c2728de8720 differ diff --git a/efk/_git/objects/de/fa5502b2f0bd8bec5e5c9f68d0a4c7924150b6 b/efk/_git/objects/de/fa5502b2f0bd8bec5e5c9f68d0a4c7924150b6 new file mode 100644 index 0000000..467ec75 Binary files /dev/null and b/efk/_git/objects/de/fa5502b2f0bd8bec5e5c9f68d0a4c7924150b6 differ diff --git a/efk/_git/objects/e2/f71555ad60cbb1606ed56c8a85a5fd28a284ce b/efk/_git/objects/e2/f71555ad60cbb1606ed56c8a85a5fd28a284ce new file mode 100644 index 0000000..334fcd6 Binary files /dev/null and b/efk/_git/objects/e2/f71555ad60cbb1606ed56c8a85a5fd28a284ce differ diff --git a/efk/_git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 b/efk/_git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 new file mode 100644 index 0000000..7112238 Binary files /dev/null and b/efk/_git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391 differ diff --git a/efk/_git/objects/e6/a3542a05059fda266d892caa0bd85c5f888413 b/efk/_git/objects/e6/a3542a05059fda266d892caa0bd85c5f888413 new file mode 100644 index 0000000..63e285e Binary files /dev/null and b/efk/_git/objects/e6/a3542a05059fda266d892caa0bd85c5f888413 differ diff --git a/efk/_git/objects/f8/76f527711ffeff34ac34e4782e4db9e9c21461 b/efk/_git/objects/f8/76f527711ffeff34ac34e4782e4db9e9c21461 new file mode 100644 index 0000000..6dd755c --- /dev/null +++ b/efk/_git/objects/f8/76f527711ffeff34ac34e4782e4db9e9c21461 @@ -0,0 +1,2 @@ +x_o0̧xoLT] $^qǶlCo?{J*/=>s5k_@ՆI!PΖMFTqRa;- Q!-ЖA4 + ֔߉V(vPUrJԇ,iY*/by%=קHa 7k7_g7F-`,#5. ?)텽omᴨ؟x O'~̊8;w.dVE1I e;c \ѷAvlN,N&uy>.I^qNpuu̅PӴ 6L<:I#<*e?pR i?!dz,/aߨ-p'8Nle6? + @type http + @id input_http + port 8888 + tag sardine.log + + + @type elasticsearch + @id output_elasticsearch + host elasticsearch + port 9200 + scheme http + user fluentd_user + password fluentd_password + index_name logstash-sardine-%Y.%m.%d + diff --git a/efk/aa.yaml b/efk/aa.yaml new file mode 100644 index 0000000..72a98f5 --- /dev/null +++ b/efk/aa.yaml @@ -0,0 +1,11 @@ +apiVersion: kibana.k8s.elastic.co/v1 +kind: Secret +metadata: + name: kibana-sample +spec: + version: 8.15.1 + count: 3 + elasticsearchRef: + name: "elasticsearch-sample" + secureSettings: + - secretName: kibana-secret-settings diff --git a/efk/backUpElasticSearch.sh b/efk/backUpElasticSearch.sh new file mode 100755 index 0000000..bfa019c --- /dev/null +++ b/efk/backUpElasticSearch.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# 设置日期格式:例如 2024-12-05-23 +CURRENT_DATE=$(date "+%Y-%m-%d-%H") + +# 获取 Elasticsearch 的 CLUSTER-IP +ELASTICSEARCH_IP=$(microk8s.kubectl get service/elasticsearch -n efk -o json | jq -r '.spec.clusterIP') + +# 调试:检查获取到的 IP 地址 +echo "Elasticsearch IP: ${ELASTICSEARCH_IP}" + +# 如果获取到的 IP 地址为空,则退出脚本 +if [ -z "$ELASTICSEARCH_IP" ]; then + echo "无法获取到 Elasticsearch IP 地址,退出脚本。" + exit 1 +fi + +# Elasticsearch 用户名和密码 +USER="elastic" +PASSWORD="your_secure_password" + +# 尝试删除当前小时的备份(如果存在) +DELETE_CURRENT_SNAPSHOT_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}" +echo "尝试删除当前小时的备份:${DELETE_CURRENT_SNAPSHOT_URL}" + +curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_CURRENT_SNAPSHOT_URL}" + +# 检查删除操作是否成功 +if [ $? -eq 0 ]; then + echo "成功删除当前小时的备份:${CURRENT_DATE}" +else + echo "当前小时的备份不存在,继续进行创建操作。" +fi + +# 创建当前小时的快照 +BACKUP_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}" +echo "备份请求的 URL: ${BACKUP_URL}" + +curl -u ${USER}:${PASSWORD} -X PUT "${BACKUP_URL}" -H 'Content-Type: application/json' -d '{ + "indices": "*", + "ignore_unavailable": true, + "include_global_state": false +}' + +# 检查备份是否成功 +if [ $? -eq 0 ]; then + echo "备份成功:${CURRENT_DATE}" + + # 删除7天前的备份 + OLD_SNAPSHOT_DATE=$(date --date='7 days ago' "+%Y-%m-%d-%H") + DELETE_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${OLD_SNAPSHOT_DATE}" + + # 打印删除请求的 URL 进行调试 + echo "删除旧备份请求的 URL: ${DELETE_URL}" + + # 使用 curl 删除旧备份 + curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_URL}" + if [ $? -eq 0 ]; then + echo "成功删除7天前的备份:${OLD_SNAPSHOT_DATE}" + else + echo "删除7天前的备份失败:${OLD_SNAPSHOT_DATE}" + fi +else + echo "备份失败:${CURRENT_DATE}" +fi +echo "当前快照列表:" +curl -u elastic:your_secure_password -X GET "http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/_all" | jq -r '.snapshots[].snapshot' + diff --git a/efk/config/elasticsearch-plugins.example.yml b/efk/config/elasticsearch-plugins.example.yml new file mode 100644 index 0000000..b6874e9 --- /dev/null +++ b/efk/config/elasticsearch-plugins.example.yml @@ -0,0 +1,27 @@ +# Rename this file to `elasticsearch-plugins.yml` to use it. +# +# All plugins must be listed here. If you add a plugin to this list and run +# `elasticsearch-plugin sync`, that plugin will be installed. If you remove +# a plugin from this list, that plugin will be removed when Elasticsearch +# next starts. + +plugins: + # Each plugin must have an ID. Plugins with only an ID are official plugins and will be downloaded from Elastic. + - id: example-id + + # Plugins can be specified by URL (it doesn't have to be HTTP, you could use e.g. `file:`) + - id: example-with-url + location: https://some.domain/path/example4.zip + + # Or by maven coordinates: + - id: example-with-maven-url + location: org.elasticsearch.plugins:example-plugin:1.2.3 + + # A proxy can also be configured per-plugin, if necessary + - id: example-with-proxy + location: https://some.domain/path/example.zip + proxy: https://some.domain:1234 + +# Configures a proxy for all network access. Remove this if you don't need +# to use a proxy. +proxy: https://some.domain:1234 diff --git a/efk/config/elasticsearch.keystore b/efk/config/elasticsearch.keystore new file mode 100644 index 0000000..1e248cb Binary files /dev/null and b/efk/config/elasticsearch.keystore differ diff --git a/efk/config/elasticsearch.yml b/efk/config/elasticsearch.yml new file mode 100644 index 0000000..50b1547 --- /dev/null +++ b/efk/config/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 diff --git a/efk/config/jvm.options b/efk/config/jvm.options new file mode 100644 index 0000000..9354ef4 --- /dev/null +++ b/efk/config/jvm.options @@ -0,0 +1,75 @@ +################################################################ +## +## JVM configuration +## +################################################################ +## +## WARNING: DO NOT EDIT THIS FILE. If you want to override the +## JVM options in this file, or set any additional options, you +## should create one or more files in the jvm.options.d +## directory containing your adjustments. +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/jvm-options.html +## for more information. +## +################################################################ + + + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## The heap size is automatically configured by Elasticsearch +## based on the available memory in your system and the roles +## each node is configured to fulfill. If specifying heap is +## required, it should be done through a file in jvm.options.d, +## which should be named with .options suffix, and the min and +## max should be set to the same value. For example, to set the +## heap to 4 GB, create a new file in the jvm.options.d +## directory containing these lines: +## +## -Xms4g +## -Xmx4g +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/heap-size.html +## for more information +## +################################################################ + + +################################################################ +## Expert settings +################################################################ +## +## All settings below here are considered expert settings. Do +## not adjust them unless you understand what you are doing. Do +## not edit them in this file; instead, create a new file in the +## jvm.options.d directory containing your adjustments. +## +################################################################ + +-XX:+UseG1GC + +## JVM temporary directory +-Djava.io.tmpdir=${ES_TMPDIR} + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails; heap dumps +# are created in the working directory of the JVM unless an alternative path is +# specified +-XX:+HeapDumpOnOutOfMemoryError + +# exit right after heap dump on out of memory error +-XX:+ExitOnOutOfMemoryError + +# specify an alternative path for heap dumps; ensure the directory exists and +# has sufficient space +-XX:HeapDumpPath=data + +# specify an alternative path for JVM fatal error logs +-XX:ErrorFile=logs/hs_err_pid%p.log + +## GC logging +-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m diff --git a/efk/config/log4j2.file.properties b/efk/config/log4j2.file.properties new file mode 100644 index 0000000..5bb63ce --- /dev/null +++ b/efk/config/log4j2.file.properties @@ -0,0 +1,279 @@ +status = error + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%consoleException%n + +######## Server JSON ############################ +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 128MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.fileIndex = nomax +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_old.policies.type = Policies +appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_old.policies.time.interval = 1 +appender.rolling_old.policies.time.modulate = true +appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_old.policies.size.size = 128MB +appender.rolling_old.strategy.type = DefaultRolloverStrategy +appender.rolling_old.strategy.fileIndex = nomax +appender.rolling_old.strategy.action.type = Delete +appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling_old.strategy.action.condition.type = IfFileName +appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling +rootLogger.appenderRef.rolling_old.ref = rolling_old + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog.json +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.size.size = 1GB +appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy +appender.index_search_slowlog_rolling.strategy.max = 4 +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.json +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + + +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.json.gz +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.size.size = 1GB +appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy +appender.index_indexing_slowlog_rolling.strategy.max = 4 +################################################# + + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + + +appender.audit_rolling.type = RollingFile +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.roles":%map{user.roles}}\ + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ + %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ + %varsNotEmpty{, "indices":%map{indices}}\ + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ + %varsNotEmpty{, "put":%map{put}}\ + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n +# "node.name" node name from the `elasticsearch.yml` settings +# "node.id" node id which should not change between cluster restarts +# "host.name" unresolved hostname of the local node +# "host.ip" the local bound ip (i.e. the ip listening for connections) +# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) +# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" +# "user.name" the subject name as authenticated by a realm +# "user.run_by.name" the original authenticated subject name that is impersonating another one. +# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. +# "user.realm" the name of the realm that authenticated "user.name" +# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain +# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") +# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain +# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from +# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain +# "user.roles" the roles array of the user; these are the roles that are granting privileges +# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" +# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" +# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token +# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token +# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster +# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" +# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node +# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated +# "realm_domain" if "realm" is under a domain, this is the name of the domain +# "url.path" the URI component between the port and the query string; it is percent (URL) encoded +# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded +# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT +# "request.body" the content of the request body entity, JSON escaped +# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request +# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) +# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) +# "indices" the array of indices that the "action" is acting upon +# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) +# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event +# "rule" name of the applied rule if the "origin.type" is "ip_filter" +# the "put", "delete", "change", "create", "invalidate" fields are only present +# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + +appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}-%i.json.gz +appender.audit_rolling.policies.type = Policies +appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.audit_rolling.policies.time.interval = 1 +appender.audit_rolling.policies.time.modulate = true +appender.audit_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.audit_rolling.policies.size.size = 1GB +appender.audit_rolling.strategy.type = DefaultRolloverStrategy +appender.audit_rolling.strategy.fileIndex = nomax + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal diff --git a/efk/config/log4j2.properties b/efk/config/log4j2.properties new file mode 100644 index 0000000..c0d67c8 --- /dev/null +++ b/efk/config/log4j2.properties @@ -0,0 +1,193 @@ +status = error + +######## Server JSON ############################ +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +################################################ + +################################################ + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + +################################################# + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + +appender.audit_rolling.type = Console +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.roles":%map{user.roles}}\ + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ + %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ + %varsNotEmpty{, "indices":%map{indices}}\ + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ + %varsNotEmpty{, "put":%map{put}}\ + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n +# "node.name" node name from the `elasticsearch.yml` settings +# "node.id" node id which should not change between cluster restarts +# "host.name" unresolved hostname of the local node +# "host.ip" the local bound ip (i.e. the ip listening for connections) +# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) +# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" +# "user.name" the subject name as authenticated by a realm +# "user.run_by.name" the original authenticated subject name that is impersonating another one. +# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. +# "user.realm" the name of the realm that authenticated "user.name" +# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain +# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") +# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain +# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from +# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain +# "user.roles" the roles array of the user; these are the roles that are granting privileges +# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" +# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" +# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token +# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token +# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster +# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" +# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node +# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated +# "realm_domain" if "realm" is under a domain, this is the name of the domain +# "url.path" the URI component between the port and the query string; it is percent (URL) encoded +# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded +# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT +# "request.body" the content of the request body entity, JSON escaped +# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request +# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) +# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) +# "indices" the array of indices that the "action" is acting upon +# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) +# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event +# "rule" name of the applied rule if the "origin.type" is "ip_filter" +# the "put", "delete", "change", "create", "invalidate" fields are only present +# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal \ No newline at end of file diff --git a/efk/config/role_mapping.yml b/efk/config/role_mapping.yml new file mode 100644 index 0000000..68c82f7 --- /dev/null +++ b/efk/config/role_mapping.yml @@ -0,0 +1,14 @@ +# Role mapping configuration file which has elasticsearch roles as keys +# that map to one or more user or group distinguished names + +#roleA: this is an elasticsearch role +# - groupA-DN this is a group distinguished name +# - groupB-DN +# - user1-DN this is the full user distinguished name + +#power_user: +# - "cn=admins,dc=example,dc=com" +#user: +# - "cn=users,dc=example,dc=com" +# - "cn=admins,dc=example,dc=com" +# - "cn=John Doe,cn=other users,dc=example,dc=com" diff --git a/efk/config/roles.yml b/efk/config/roles.yml new file mode 100644 index 0000000..68e003b --- /dev/null +++ b/efk/config/roles.yml @@ -0,0 +1,3 @@ +# The default roles file is empty as the preferred method of defining roles is +# through the API/UI. File based roles are useful in error scenarios when the +# API based roles may not be available. diff --git a/efk/config/service_tokens b/efk/config/service_tokens new file mode 100644 index 0000000..3c74311 --- /dev/null +++ b/efk/config/service_tokens @@ -0,0 +1 @@ +elastic/kibana/my-token:{PBKDF2_STRETCH}10000$hqtLfPNEvoHIGGMhgpcrdTNCaXBqHGIOAj7ndDmt8w8=$073Kw/8neGbcNJQAi37DhyEKiIvYIM4MkzvuflndCbg= diff --git a/efk/config/users b/efk/config/users new file mode 100644 index 0000000..e69de29 diff --git a/efk/config/users_roles b/efk/config/users_roles new file mode 100644 index 0000000..e69de29 diff --git a/efk/createFluentdAccoutnIn.sh b/efk/createFluentdAccoutnIn.sh new file mode 100755 index 0000000..abad64e --- /dev/null +++ b/efk/createFluentdAccoutnIn.sh @@ -0,0 +1,32 @@ +#!/bin/bash +ELASTIC_PASSWORD="your_secure_password" +ELASTIC_HOST=$(microk8s.kubectl get svc elasticsearch -n efk -o wide | awk 'NR==2 {print $3}') +echo $ES_IP + +curl -X PUT "http://${ELASTIC_HOST}:9200/_security/role/fluentd_writer" \ +-u elastic:$ELASTIC_PASSWORD \ +-H "Content-Type: application/json" \ +-d '{ + "cluster": ["monitor"], + "indices": [ + { + "names": ["logstash-*"], + "privileges": ["write", "create_index"] + } + ] + }' +echo "\n" +curl -X PUT "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \ +-u elastic:$ELASTIC_PASSWORD \ +-H "Content-Type: application/json" \ +-d '{ + "password": "fluentd_password", + "roles": ["fluentd_writer"] + }' + + +echo "\n" +curl -X GET "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \ +-u elastic:$ELASTIC_PASSWORD + +echo "\n" diff --git a/efk/createSecure_passwd_forES.sh b/efk/createSecure_passwd_forES.sh new file mode 100755 index 0000000..cbd9022 --- /dev/null +++ b/efk/createSecure_passwd_forES.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +microk8s kubectl create secret generic elasticsearch-secret -n efk \ + --from-literal=elastic_password='your_secure_password' diff --git a/efk/efk-namespace.yaml b/efk/efk-namespace.yaml new file mode 100644 index 0000000..2bdba40 --- /dev/null +++ b/efk/efk-namespace.yaml @@ -0,0 +1,5 @@ +# efk-namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: efk diff --git a/efk/elasticsearch-deployment.yaml b/efk/elasticsearch-deployment.yaml new file mode 100644 index 0000000..a99d80c --- /dev/null +++ b/efk/elasticsearch-deployment.yaml @@ -0,0 +1,46 @@ +# elasticsearch-deployment-8.8.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: elasticsearch + namespace: efk + labels: + app: elasticsearch +spec: + replicas: 1 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0 + ports: + - containerPort: 9200 + - containerPort: 9300 + env: + - name: discovery.type + value: "single-node" + - name: xpack.security.enabled + value: "true" + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-secret + key: elastic_password + volumeMounts: + - name: data-volume + mountPath: /usr/share/elasticsearch/data + - name: config-volume + mountPath: /usr/share/elasticsearch/config + volumes: + - name: data-volume + persistentVolumeClaim: + claimName: elasticsearch-data-pvc + - name: config-volume + persistentVolumeClaim: + claimName: elasticsearch-config-pvc diff --git a/efk/elasticsearch-ingress.yaml b/efk/elasticsearch-ingress.yaml new file mode 100644 index 0000000..a6b2572 --- /dev/null +++ b/efk/elasticsearch-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: elasticsearch + namespace: efk +spec: + ingressClassName: traefik + rules: + - host: elastic.k8s.xunlang.home + http: + paths: + - backend: + service: + name: elasticsearch + port: + number: 9200 + path: / + pathType: Prefix diff --git a/efk/elasticsearch-pv.yaml b/efk/elasticsearch-pv.yaml new file mode 100644 index 0000000..c1dbfe9 --- /dev/null +++ b/efk/elasticsearch-pv.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: elasticsearch-data-pv +spec: + capacity: + storage: 300Gi + accessModes: + - ReadWriteOnce + storageClassName: microk8s-hostpath + hostPath: + path: /var/snap/microk8s/common/mnt/data/elasticsearch-data +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: elasticsearch-config-pv +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + storageClassName: microk8s-hostpath + hostPath: + path: /var/snap/microk8s/common/mnt/data/elasticsearch-config diff --git a/efk/elasticsearch-pvc.yaml b/efk/elasticsearch-pvc.yaml new file mode 100644 index 0000000..141230f --- /dev/null +++ b/efk/elasticsearch-pvc.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: elasticsearch-data-pvc + namespace: efk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 300Gi + storageClassName: microk8s-hostpath # 确保匹配 + volumeName: elasticsearch-data-pv # 手动绑定 PV +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: elasticsearch-config-pvc + namespace: efk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + storageClassName: microk8s-hostpath # 确保匹配 + volumeName: elasticsearch-config-pv # 手动绑定 PV diff --git a/efk/elasticsearch-service.yaml b/efk/elasticsearch-service.yaml new file mode 100644 index 0000000..34fd91d --- /dev/null +++ b/efk/elasticsearch-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + namespace: efk +spec: + ports: + - port: 9200 + targetPort: 9200 + selector: + app: elasticsearch + type: LoadBalancer diff --git a/efk/fluentd-configMap.yaml b/efk/fluentd-configMap.yaml new file mode 100644 index 0000000..0226110 --- /dev/null +++ b/efk/fluentd-configMap.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluentd-config + namespace: efk +data: + fluent.conf: | + + @type http + @id input_http + port 8888 + tag sardine.log + @label @main + + + + + + @type stdout + @id output_stdout_all + + diff --git a/efk/fluentd-daemonset.yaml b/efk/fluentd-daemonset.yaml new file mode 100644 index 0000000..249b7c4 --- /dev/null +++ b/efk/fluentd-daemonset.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd + namespace: efk + labels: + k8s-app: fluentd-logging + version: v1 +spec: + selector: + matchLabels: + k8s-app: fluentd-logging + version: v1 + template: + metadata: + labels: + k8s-app: fluentd-logging + version: v1 + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd + image: fluent/fluentd-kubernetes-daemonset:v1.17.1-debian-elasticsearch8-1.0 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: TZ + value: Asia/Shanghai # 设置时区为东八区 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: fluentd-config-volume + mountPath: /fluentd/etc/fluent.conf + subPath: fluent.conf + - name: timezone + mountPath: /etc/localtime + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: fluentd-config-volume + configMap: + name: fluentd-config + - name: timezone + hostPath: + path: /usr/share/zoneinfo/Asia/Shanghai # 挂载东八区时区文件 + diff --git a/efk/fluentd-ingress.yaml b/efk/fluentd-ingress.yaml new file mode 100644 index 0000000..39161d1 --- /dev/null +++ b/efk/fluentd-ingress.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: fluentd-ingress + namespace: efk + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + # 添加 SSL 终止支持的注释,如果需要 TLS/SSL 支持 + # nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + ingressClassName: traefik + rules: + - host: fluentd.k8s.xunlang.home + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: fluentd-service + port: + number: 8888 + # 如果你希望启用 TLS/SSL,需要额外的配置 + # tls: + # - hosts: + # - fluentd.k8s.xunlang.home + # secretName: fluentd-tls-secret # 这是用于TLS的secret + diff --git a/efk/fluentd-ingress2.yaml_disabled b/efk/fluentd-ingress2.yaml_disabled new file mode 100644 index 0000000..579f791 --- /dev/null +++ b/efk/fluentd-ingress2.yaml_disabled @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: fluentd-ingress + namespace: kube-system + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + # 如果需要 TLS/SSL 支持,可以开启如下注释 + # nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + rules: + - host: fluentd.k8s.xunlang.home + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: fluentd-service # 你的 Fluentd Service + port: + number: 8888 # Fluentd 容器内部的端口 + diff --git a/efk/fluentd-service.yaml b/efk/fluentd-service.yaml new file mode 100644 index 0000000..6481892 --- /dev/null +++ b/efk/fluentd-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: fluentd-service + namespace: efk +spec: + type: LoadBalancer + selector: + k8s-app: fluentd-logging + ports: + - protocol: TCP + port: 8888 # Service 暴露的端口 + targetPort: 8888 # Fluentd 容器内部的端口 + diff --git a/efk/fluentd.conf b/efk/fluentd.conf new file mode 100644 index 0000000..5f8fd5e --- /dev/null +++ b/efk/fluentd.conf @@ -0,0 +1,20 @@ + + @type http + @id input_http + port 8888 + tag sardine.log + + + + @type elasticsearch + @id output_elasticsearch + host elasticsearch + port 9200 + scheme http + user fluentd_user + password fluentd_password + logstash_format true + logstash_prefix logstash + logstash_dateformat %Y.%m.%d + + diff --git a/efk/fluentdLogs.sh b/efk/fluentdLogs.sh new file mode 100755 index 0000000..7b1201f --- /dev/null +++ b/efk/fluentdLogs.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}') +echo "new pod is: " ${FLUENTD_POD} +microk8s.kubectl logs -f pod/${FLUENTD_POD} -n efk diff --git a/efk/getConfigFlentd.sh b/efk/getConfigFlentd.sh new file mode 100755 index 0000000..62b6e0e --- /dev/null +++ b/efk/getConfigFlentd.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +microk8s.kubectl describe configMap fluentd-config -n efk diff --git a/efk/init.sh b/efk/init.sh new file mode 100644 index 0000000..8e599c3 --- /dev/null +++ b/efk/init.sh @@ -0,0 +1,33 @@ +cd /home/ubuntu/k8sCongfigs/efk +sudo su +// 准备 es 配置 +mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true +mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-config -p || true +cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r +// 创建 es 资源 +microk8s.kubectl apply -f efk-namespace.yaml +microk8s.kubectl apply -f elasticsearch-deployment.yaml +microk8s.kubectl apply -f elasticsearch-ingress.yaml +microk8s.kubectl apply -f elasticsearch-pv.yaml +microk8s.kubectl apply -f elasticsearch-pvc.yaml +microk8s.kubectl apply -f elasticsearch-service.yaml +// 这个时候正在创建elasticsearch的pod,需要拉取镜像,大概1个多小时,如果有离线的直接导入离线的镜像 +sleep 3600 +./createSecure_passwd_forES.sh + +microk8s.kubectl apply -f fluentd-configMap.yaml +microk8s.kubectl apply -f fluentd-daemonset.yaml +microk8s.kubectl apply -f fluentd-service.yaml +microk8s.kubectl apply -f fluentd-ingress.yaml +microk8s.kubectl apply -f fluentd-service.yaml + +microk8s.kubectl apply -f kibana-deployment.yaml +microk8s.kubectl apply -f kibana-ingress.yaml +microk8s.kubectl apply -f kibana-service.yaml + + + + + + + diff --git a/efk/kibana-configMap.yaml_disabled b/efk/kibana-configMap.yaml_disabled new file mode 100644 index 0000000..06006ec --- /dev/null +++ b/efk/kibana-configMap.yaml_disabled @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kibana-config +data: + kibana.yml: | + server.host: "0.0.0.0" + elasticsearch.hosts: ["http://elasticsearch:9200"] + # Add other configurations here + diff --git a/efk/kibana-deployment.yaml b/efk/kibana-deployment.yaml new file mode 100644 index 0000000..a65dbcc --- /dev/null +++ b/efk/kibana-deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: efk + labels: + app: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: "http://elasticsearch.efk.svc.cluster.local:9200" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: "a_secure_random_string_of_32_characters" + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: "another_secure_random_string_of_32_characters" + - name: XPACK_REPORTING_ENCRYPTIONKEY + value: "yet_another_secure_random_string_of_32_characters" + - name: ELASTICSEARCH_SERVICEACCOUNTTOKEN + value: "AAEAAWVsYXN0aWMva2liYW5hL215LXRva2VuOlZycER6YktPU2Qtb0xXM1hHQ2ZtUnc" diff --git a/efk/kibana-deployment.yaml_tmp b/efk/kibana-deployment.yaml_tmp new file mode 100644 index 0000000..bdca028 --- /dev/null +++ b/efk/kibana-deployment.yaml_tmp @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: efk + labels: + app: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: "http://elasticsearch.efk.svc.cluster.local:9200" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: "a_secure_random_string_of_32_characters" + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: "another_secure_random_string_of_32_characters" + - name: XPACK_REPORTING_ENCRYPTIONKEY + value: "yet_another_secure_random_string_of_32_characters" + - name: ELASTICSEARCH_SERVICEACCOUNTTOKEN + value: "##TOKEN##" diff --git a/efk/kibana-ingress.yaml b/efk/kibana-ingress.yaml new file mode 100644 index 0000000..8265b2a --- /dev/null +++ b/efk/kibana-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kibana + namespace: efk +spec: + ingressClassName: traefik + rules: + - host: kibana.k8s.xunlang.home + http: + paths: + - backend: + service: + name: kibana + port: + number: 5601 + path: / + pathType: Prefix diff --git a/efk/kibana-persistent.yaml_disabled b/efk/kibana-persistent.yaml_disabled new file mode 100644 index 0000000..148e119 --- /dev/null +++ b/efk/kibana-persistent.yaml_disabled @@ -0,0 +1,25 @@ +# PersistentVolume 配置 +apiVersion: v1 +kind: PersistentVolume +metadata: + name: kibana-config-pv +spec: + capacity: + storage: 1Gi # 这里用 capacity 指定存储大小 + accessModes: + - ReadWriteOnce + hostPath: + path: /var/snap/microk8s/common/mnt/data/kibana # 本地路径 +--- +# PersistentVolumeClaim 配置 +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kibana-config-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi # 这里使用 resources 请求存储大小 + diff --git a/efk/kibana-service.yaml b/efk/kibana-service.yaml new file mode 100644 index 0000000..24ff26f --- /dev/null +++ b/efk/kibana-service.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: kibana + namespace: efk +spec: + ports: + - port: 5601 + targetPort: 5601 + selector: + app: kibana + type: LoadBalancer diff --git a/efk/refreshTokenForKibana.sh b/efk/refreshTokenForKibana.sh new file mode 100755 index 0000000..31c0af4 --- /dev/null +++ b/efk/refreshTokenForKibana.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# 获取 elasticsearch 实例的 Pod 名称 +ES_POD=$(microk8s.kubectl get pods -n efk -l app=elasticsearch -o jsonpath='{.items[0].metadata.name}') +echo "ES_POD:" ${ES_POD} + +# 进入 Elasticsearch Pod 并生成服务账号 token +microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens delete elastic/kibana my-token +TOKEN=$(microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens create elastic/kibana my-token | grep 'SERVICE_TOKEN' | awk '{print $NF}') + +echo "new TOKEN:" ${TOKEN} + + +microk8s.kubectl delete deployment kibana -n efk +# 更新 Kibana Deployment YAML 文件 +cd $(pwd) +# 将 token 设置为环境变量 +sed "s/##TOKEN##/$TOKEN/" kibana-deployment.yaml_tmp > kibana-deployment.yaml +# 删除并重新应用 Kibana Deployment +microk8s.kubectl apply -f kibana-deployment.yaml + diff --git a/efk/updateFluentdConfigMap.sh b/efk/updateFluentdConfigMap.sh new file mode 100755 index 0000000..5aee016 --- /dev/null +++ b/efk/updateFluentdConfigMap.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +microk8s.kubectl delete configMap fluentd-config -n efk +microk8s.kubectl apply -f fluentd-configMap.yaml +microk8s.kubectl delete daemonSet fluentd -n efk +microk8s.kubectl apply -f fluentd-daemonset.yaml +sleep 5 +FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}') +echo "new pod is: " ${FLUENTD_POD} +microk8s.kubectl logs pod/${FLUENTD_POD} -n efk +sleep 2 +exit + diff --git a/efk2/README.md b/efk2/README.md new file mode 100644 index 0000000..4774604 --- /dev/null +++ b/efk2/README.md @@ -0,0 +1,17 @@ +ekf空间下资源部署完毕后执行这个 + +``` +./createSecure_passwd.sh +``` + +创建了 +账号:elastic +密码:your_secure_password + +然后创建服务账号给kibana, 并重新部署 + +``` +./refreshToken.sh +``` + +浏览:http://kibana.k8s.xunlang.home diff --git a/efk2/aa.txt b/efk2/aa.txt new file mode 100644 index 0000000..53bd39d --- /dev/null +++ b/efk2/aa.txt @@ -0,0 +1,16 @@ + + @type http + @id input_http + port 8888 + tag sardine.log + + + @type elasticsearch + @id output_elasticsearch + host elasticsearch + port 9200 + scheme http + user fluentd_user + password fluentd_password + index_name logstash-sardine-%Y.%m.%d + diff --git a/efk2/aa.yaml b/efk2/aa.yaml new file mode 100644 index 0000000..72a98f5 --- /dev/null +++ b/efk2/aa.yaml @@ -0,0 +1,11 @@ +apiVersion: kibana.k8s.elastic.co/v1 +kind: Secret +metadata: + name: kibana-sample +spec: + version: 8.15.1 + count: 3 + elasticsearchRef: + name: "elasticsearch-sample" + secureSettings: + - secretName: kibana-secret-settings diff --git a/efk2/createSecure_passwd.sh b/efk2/createSecure_passwd.sh new file mode 100755 index 0000000..cbd9022 --- /dev/null +++ b/efk2/createSecure_passwd.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +microk8s kubectl create secret generic elasticsearch-secret -n efk \ + --from-literal=elastic_password='your_secure_password' diff --git a/efk2/efk-namespace.yaml b/efk2/efk-namespace.yaml new file mode 100644 index 0000000..2bdba40 --- /dev/null +++ b/efk2/efk-namespace.yaml @@ -0,0 +1,5 @@ +# efk-namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: efk diff --git a/efk2/elasticsearch-deployment.yaml b/efk2/elasticsearch-deployment.yaml new file mode 100644 index 0000000..a99d80c --- /dev/null +++ b/efk2/elasticsearch-deployment.yaml @@ -0,0 +1,46 @@ +# elasticsearch-deployment-8.8.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: elasticsearch + namespace: efk + labels: + app: elasticsearch +spec: + replicas: 1 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0 + ports: + - containerPort: 9200 + - containerPort: 9300 + env: + - name: discovery.type + value: "single-node" + - name: xpack.security.enabled + value: "true" + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-secret + key: elastic_password + volumeMounts: + - name: data-volume + mountPath: /usr/share/elasticsearch/data + - name: config-volume + mountPath: /usr/share/elasticsearch/config + volumes: + - name: data-volume + persistentVolumeClaim: + claimName: elasticsearch-data-pvc + - name: config-volume + persistentVolumeClaim: + claimName: elasticsearch-config-pvc diff --git a/efk2/elasticsearch-ingress.yaml b/efk2/elasticsearch-ingress.yaml new file mode 100644 index 0000000..a6b2572 --- /dev/null +++ b/efk2/elasticsearch-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: elasticsearch + namespace: efk +spec: + ingressClassName: traefik + rules: + - host: elastic.k8s.xunlang.home + http: + paths: + - backend: + service: + name: elasticsearch + port: + number: 9200 + path: / + pathType: Prefix diff --git a/efk2/elasticsearch-pv.yaml b/efk2/elasticsearch-pv.yaml new file mode 100644 index 0000000..e6a3542 --- /dev/null +++ b/efk2/elasticsearch-pv.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: elasticsearch-data-pv + namespace: efk +spec: + capacity: + storage: 300Gi + accessModes: + - ReadWriteOnce + storageClassName: microk8s-hostpath # 确保与 PVC 的 StorageClass 匹配 + hostPath: + path: /var/snap/microk8s/common/mnt/data/elasticsearch-data +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: elasticsearch-config-pv + namespace: efk +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + storageClassName: microk8s-hostpath # 确保与 PVC 的 StorageClass 匹配 + hostPath: + path: /var/snap/microk8s/common/mnt/data/elasticsearch-config + diff --git a/efk2/elasticsearch-pvc.yaml b/efk2/elasticsearch-pvc.yaml new file mode 100644 index 0000000..ab21c3d --- /dev/null +++ b/efk2/elasticsearch-pvc.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: elasticsearch-data-pvc + namespace: efk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 300Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: elasticsearch-config-pvc + namespace: efk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + diff --git a/efk2/elasticsearch-service.yaml b/efk2/elasticsearch-service.yaml new file mode 100644 index 0000000..34fd91d --- /dev/null +++ b/efk2/elasticsearch-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + namespace: efk +spec: + ports: + - port: 9200 + targetPort: 9200 + selector: + app: elasticsearch + type: LoadBalancer diff --git a/efk2/fluentd-configMap.yaml b/efk2/fluentd-configMap.yaml new file mode 100644 index 0000000..5437f61 --- /dev/null +++ b/efk2/fluentd-configMap.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluentd-config + namespace: efk +data: + fluent.conf: | + + @type http + @id input_http + port 8888 + tag sardine.log + @label @main + + + + + + @type stdout + @id output_stdout_all + + diff --git a/efk2/fluentd-daemonset.yaml b/efk2/fluentd-daemonset.yaml new file mode 100644 index 0000000..cdd91eb --- /dev/null +++ b/efk2/fluentd-daemonset.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd + namespace: efk + labels: + k8s-app: fluentd-logging + version: v1 +spec: + selector: + matchLabels: + k8s-app: fluentd-logging + version: v1 + template: + metadata: + labels: + k8s-app: fluentd-logging + version: v1 + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd + image: fluent/fluentd-kubernetes-daemonset:v1.17.1-debian-elasticsearch8-1.0 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: fluentd-config-volume + mountPath: /fluentd/etc/fluent.conf + subPath: fluent.conf + terminationGracePeriodSeconds: 30 + volumes: + - name: fluentd-config-volume + configMap: + name: fluentd-config + diff --git a/efk2/fluentd-ingress.yaml b/efk2/fluentd-ingress.yaml new file mode 100644 index 0000000..39161d1 --- /dev/null +++ b/efk2/fluentd-ingress.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: fluentd-ingress + namespace: efk + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + # 添加 SSL 终止支持的注释,如果需要 TLS/SSL 支持 + # nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + ingressClassName: traefik + rules: + - host: fluentd.k8s.xunlang.home + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: fluentd-service + port: + number: 8888 + # 如果你希望启用 TLS/SSL,需要额外的配置 + # tls: + # - hosts: + # - fluentd.k8s.xunlang.home + # secretName: fluentd-tls-secret # 这是用于TLS的secret + diff --git a/efk2/fluentd-ingress2.yaml b/efk2/fluentd-ingress2.yaml new file mode 100644 index 0000000..579f791 --- /dev/null +++ b/efk2/fluentd-ingress2.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: fluentd-ingress + namespace: kube-system + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + # 如果需要 TLS/SSL 支持,可以开启如下注释 + # nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + rules: + - host: fluentd.k8s.xunlang.home + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: fluentd-service # 你的 Fluentd Service + port: + number: 8888 # Fluentd 容器内部的端口 + diff --git a/efk2/fluentd-service.yaml b/efk2/fluentd-service.yaml new file mode 100644 index 0000000..6481892 --- /dev/null +++ b/efk2/fluentd-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: fluentd-service + namespace: efk +spec: + type: LoadBalancer + selector: + k8s-app: fluentd-logging + ports: + - protocol: TCP + port: 8888 # Service 暴露的端口 + targetPort: 8888 # Fluentd 容器内部的端口 + diff --git a/efk2/fluentd.conf b/efk2/fluentd.conf new file mode 120000 index 0000000..2856c15 --- /dev/null +++ b/efk2/fluentd.conf @@ -0,0 +1 @@ +/var/snap/microk8s/common/mnt/config/fluentd/fluentd.conf \ No newline at end of file diff --git a/efk2/fluentd.tar b/efk2/fluentd.tar new file mode 100644 index 0000000..c13e6e0 Binary files /dev/null and b/efk2/fluentd.tar differ diff --git a/efk2/fluentdLogs.sh b/efk2/fluentdLogs.sh new file mode 100755 index 0000000..7b1201f --- /dev/null +++ b/efk2/fluentdLogs.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}') +echo "new pod is: " ${FLUENTD_POD} +microk8s.kubectl logs -f pod/${FLUENTD_POD} -n efk diff --git a/efk2/getConfigFlentd.sh b/efk2/getConfigFlentd.sh new file mode 100755 index 0000000..62b6e0e --- /dev/null +++ b/efk2/getConfigFlentd.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +microk8s.kubectl describe configMap fluentd-config -n efk diff --git a/efk2/kibana-configMap.yaml_disabled b/efk2/kibana-configMap.yaml_disabled new file mode 100644 index 0000000..06006ec --- /dev/null +++ b/efk2/kibana-configMap.yaml_disabled @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kibana-config +data: + kibana.yml: | + server.host: "0.0.0.0" + elasticsearch.hosts: ["http://elasticsearch:9200"] + # Add other configurations here + diff --git a/efk2/kibana-deployment.yaml b/efk2/kibana-deployment.yaml new file mode 100644 index 0000000..f876f52 --- /dev/null +++ b/efk2/kibana-deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: efk + labels: + app: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: "http://elasticsearch.efk.svc.cluster.local:9200" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: "a_secure_random_string_of_32_characters" + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: "another_secure_random_string_of_32_characters" + - name: XPACK_REPORTING_ENCRYPTIONKEY + value: "yet_another_secure_random_string_of_32_characters" + - name: ELASTICSEARCH_SERVICEACCOUNTTOKEN + value: "AAEAAWVsYXN0aWMva2liYW5hL215LXRva2VuOjFFSUp0anRiU05XYWpBSjZwaURIb0E" diff --git a/efk2/kibana-deployment.yaml_tmp b/efk2/kibana-deployment.yaml_tmp new file mode 100644 index 0000000..bdca028 --- /dev/null +++ b/efk2/kibana-deployment.yaml_tmp @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: efk + labels: + app: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: "http://elasticsearch.efk.svc.cluster.local:9200" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: "a_secure_random_string_of_32_characters" + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: "another_secure_random_string_of_32_characters" + - name: XPACK_REPORTING_ENCRYPTIONKEY + value: "yet_another_secure_random_string_of_32_characters" + - name: ELASTICSEARCH_SERVICEACCOUNTTOKEN + value: "##TOKEN##" diff --git a/efk2/kibana-ingress.yaml b/efk2/kibana-ingress.yaml new file mode 100644 index 0000000..8265b2a --- /dev/null +++ b/efk2/kibana-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kibana + namespace: efk +spec: + ingressClassName: traefik + rules: + - host: kibana.k8s.xunlang.home + http: + paths: + - backend: + service: + name: kibana + port: + number: 5601 + path: / + pathType: Prefix diff --git a/efk2/kibana-persistent.yaml_disabled b/efk2/kibana-persistent.yaml_disabled new file mode 100644 index 0000000..148e119 --- /dev/null +++ b/efk2/kibana-persistent.yaml_disabled @@ -0,0 +1,25 @@ +# PersistentVolume 配置 +apiVersion: v1 +kind: PersistentVolume +metadata: + name: kibana-config-pv +spec: + capacity: + storage: 1Gi # 这里用 capacity 指定存储大小 + accessModes: + - ReadWriteOnce + hostPath: + path: /var/snap/microk8s/common/mnt/data/kibana # 本地路径 +--- +# PersistentVolumeClaim 配置 +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kibana-config-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi # 这里使用 resources 请求存储大小 + diff --git a/efk2/kibana-service.yaml b/efk2/kibana-service.yaml new file mode 100644 index 0000000..24ff26f --- /dev/null +++ b/efk2/kibana-service.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: kibana + namespace: efk +spec: + ports: + - port: 5601 + targetPort: 5601 + selector: + app: kibana + type: LoadBalancer diff --git a/efk2/kk.sh b/efk2/kk.sh new file mode 100755 index 0000000..c1d4da5 --- /dev/null +++ b/efk2/kk.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +microk8s.kubectl create configmap fluentd-config --from-literal=fluent.conf=' + + @type http + @id input_http + port 8888 + tag sardine.log + + + @type elasticsearch + @id output_elasticsearch + host elasticsearch + port 9200 + scheme http + user fluentd_user + password fluentd_password + logstash_format true + +' -n efk + diff --git a/efk2/refreshTokenForKibana.sh b/efk2/refreshTokenForKibana.sh new file mode 100755 index 0000000..31c0af4 --- /dev/null +++ b/efk2/refreshTokenForKibana.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# 获取 elasticsearch 实例的 Pod 名称 +ES_POD=$(microk8s.kubectl get pods -n efk -l app=elasticsearch -o jsonpath='{.items[0].metadata.name}') +echo "ES_POD:" ${ES_POD} + +# 进入 Elasticsearch Pod 并生成服务账号 token +microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens delete elastic/kibana my-token +TOKEN=$(microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens create elastic/kibana my-token | grep 'SERVICE_TOKEN' | awk '{print $NF}') + +echo "new TOKEN:" ${TOKEN} + + +microk8s.kubectl delete deployment kibana -n efk +# 更新 Kibana Deployment YAML 文件 +cd $(pwd) +# 将 token 设置为环境变量 +sed "s/##TOKEN##/$TOKEN/" kibana-deployment.yaml_tmp > kibana-deployment.yaml +# 删除并重新应用 Kibana Deployment +microk8s.kubectl apply -f kibana-deployment.yaml + diff --git a/efk2/updateFluentdConfigMap.sh b/efk2/updateFluentdConfigMap.sh new file mode 100755 index 0000000..5aee016 --- /dev/null +++ b/efk2/updateFluentdConfigMap.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +microk8s.kubectl delete configMap fluentd-config -n efk +microk8s.kubectl apply -f fluentd-configMap.yaml +microk8s.kubectl delete daemonSet fluentd -n efk +microk8s.kubectl apply -f fluentd-daemonset.yaml +sleep 5 +FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}') +echo "new pod is: " ${FLUENTD_POD} +microk8s.kubectl logs pod/${FLUENTD_POD} -n efk +sleep 2 +exit + diff --git a/helloWorld/helloWorld.yaml b/helloWorld/helloWorld.yaml new file mode 100644 index 0000000..2ec5813 --- /dev/null +++ b/helloWorld/helloWorld.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld +spec: + selector: + matchLabels: + app: helloworld + replicas: 1 # tells deployment to run 1 pods matching the template + template: # create pods using pod definition in this template + metadata: + labels: + app: helloworld + spec: + containers: + - name: helloworld + image: karthequian/helloworld:latest + ports: + - containerPort: 80 diff --git a/helloWorld/helloWorld_ingress.yaml b/helloWorld/helloWorld_ingress.yaml new file mode 100644 index 0000000..0138884 --- /dev/null +++ b/helloWorld/helloWorld_ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: helloworld + namespace: default +spec: + ingressClassName: traefik + rules: + - host: cluster0.k8s.helloworld-service.stage.extra + http: + paths: + - backend: + service: + name: helloworld-service + port: + number: 80 + path: / + pathType: Prefix diff --git a/helloWorld/helloWorld_service.yaml b/helloWorld/helloWorld_service.yaml new file mode 100644 index 0000000..d625b14 --- /dev/null +++ b/helloWorld/helloWorld_service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: helloworld-service +spec: + selector: + app: helloworld + ports: + - protocol: TCP + port: 80 + targetPort: 80 + type: LoadBalancer diff --git a/traefik/traefik-ClusterRole.yaml b/traefik/traefik-ClusterRole.yaml new file mode 100644 index 0000000..d667495 --- /dev/null +++ b/traefik/traefik-ClusterRole.yaml @@ -0,0 +1,39 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: traefik-ingress-controller +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses + - ingressclasses + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: traefik-ingress-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: traefik-ingress-controller +subjects: + - kind: ServiceAccount + name: traefik + namespace: traefik + diff --git a/traefik/traefik_role.yaml b/traefik/traefik_role.yaml new file mode 100644 index 0000000..ab26260 --- /dev/null +++ b/traefik/traefik_role.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: traefik-ingress-role + namespace: default +rules: +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch diff --git a/traefik/traefik_roleBind.yaml b/traefik/traefik_roleBind.yaml new file mode 100644 index 0000000..894cbe7 --- /dev/null +++ b/traefik/traefik_roleBind.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: traefik-ingress-role-binding + namespace: default +subjects: +- kind: ServiceAccount + name: traefik-ingress + namespace: default +roleRef: + kind: Role + name: traefik-ingress-role + apiGroup: rbac.authorization.k8s.io diff --git a/vnc-desktop/vnc-deployment.yaml b/vnc-desktop/vnc-deployment.yaml new file mode 100644 index 0000000..e052b42 --- /dev/null +++ b/vnc-desktop/vnc-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vnc-desktop + namespace: default + labels: + app: vnc-desktop +spec: + replicas: 1 + selector: + matchLabels: + app: vnc-desktop + template: + metadata: + labels: + app: vnc-desktop + spec: + containers: + - name: vnc-desktop + image: sappine/arm64-vnc-desktop:latest + ports: + - containerPort: 5901 + env: + - name: VNC_PASSWORD + valueFrom: + secretKeyRef: + name: vnc-secret + key: password + diff --git a/vnc-desktop/vnc-secret.yaml b/vnc-desktop/vnc-secret.yaml new file mode 100644 index 0000000..977493f --- /dev/null +++ b/vnc-desktop/vnc-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: vnc-secret + namespace: default +type: Opaque +data: + password: QWJjZEAxMjM0 + diff --git a/vnc-desktop/vnc-service.yaml b/vnc-desktop/vnc-service.yaml new file mode 100644 index 0000000..07ee604 --- /dev/null +++ b/vnc-desktop/vnc-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: vnc-service + namespace: default +spec: + type: LoadBalancer + ports: + - port: 5901 + targetPort: 5901 + protocol: TCP + selector: + app: vnc-desktop + diff --git a/whoami/whoami.tar b/whoami/whoami.tar new file mode 100644 index 0000000..1650f1c Binary files /dev/null and b/whoami/whoami.tar differ diff --git a/whoami/whoami_deployment.yaml b/whoami/whoami_deployment.yaml new file mode 100644 index 0000000..ac0a6c3 --- /dev/null +++ b/whoami/whoami_deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: whoami-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: whoami + template: + metadata: + labels: + app: whoami + spec: + containers: + - name: whoami + image: traefik/whoami:latest + ports: + - containerPort: 80 + diff --git a/whoami/whoami_ingress.yaml b/whoami/whoami_ingress.yaml new file mode 100644 index 0000000..e5dd178 --- /dev/null +++ b/whoami/whoami_ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: whoami + namespace: default +spec: + ingressClassName: traefik + rules: + - host: whoami.k8s.xunlang.home + http: + paths: + - backend: + service: + name: whoami-service + port: + number: 80 + path: / + pathType: Prefix diff --git a/whoami/whoami_service.yaml b/whoami/whoami_service.yaml new file mode 100644 index 0000000..ad92da3 --- /dev/null +++ b/whoami/whoami_service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: whoami-service +spec: + selector: + app: whoami + ports: + - protocol: TCP + port: 80 + targetPort: 80 + type: LoadBalancer