diff --git a/efk/init.sh b/efk/init.sh
index 210f4b0..667eb4a 100644
--- a/efk/init.sh
+++ b/efk/init.sh
@@ -1,11 +1,9 @@
-
-
cd /home/ubuntu/k8sCongfigs/traefik
microk8s.kubectl apply -f traefik_role.yaml
microk8s.kubectl apply -f traefik_role.yaml
microk8s.kubectl apply -f traefik_roleBind.yaml
-cd /home/ubuntu/k8sCongfigs/efk
+cd /home/ubuntu/k8sCongfigs/efk
sudo su
// 准备 es 配置
mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true
@@ -14,6 +12,7 @@ cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/fluentd-x86-image.tar
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/elasticsearch-8-8-0.tar
+microk8s.ctr image import ~/shared/powerssd/images/docker/x86/kibana-8-8-0.tar
// 创建 es 资源
microk8s.kubectl apply -f efk-namespace.yaml
@@ -22,7 +21,7 @@ microk8s.kubectl apply -f elasticsearch-ingress.yaml
microk8s.kubectl apply -f elasticsearch-pv.yaml
microk8s.kubectl apply -f elasticsearch-pvc.yaml
microk8s.kubectl apply -f elasticsearch-service.yaml
-// 这个时候正在创建elasticsearch的pod,需要拉取镜像,大概1个多小时,如果有离线的直接导入离线的镜像
+// 这个时候正在创建elasticsearch的pod,需要拉取镜像,大概1个多小时,如果有离线的直接导入离线的镜像
sleep 60
./createSecure_passwd_forES.sh
./createFluentdAccoutnIn.sh
@@ -38,10 +37,3 @@ microk8s.kubectl apply -f kibana-ingress.yaml
microk8s.kubectl apply -f kibana-service.yaml
./refreshTokenForKibana.sh
-
-
-
-
-
-
-
diff --git a/efk_cluster/README.md b/efk_cluster/README.md
new file mode 100644
index 0000000..f19dca0
--- /dev/null
+++ b/efk_cluster/README.md
@@ -0,0 +1,73 @@
+## 如何重新创建整个namespace
+
+```
+./init.sh
+```
+```
+```
+
+下面都都不用看了, 直接执行上面的语句
+
+先删了 namespace efk,这样里面资源就都没了,除了pv
+然后 microk8s.kubectl create namespace efk
+
+
+option1:
+1. 确保已经把本目录下config内容复制到 /var/snap/microk8s/common/mnt/data/elasticsearch-config
+ - sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true
+ - sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-config -p || true
+2. ekf空间下资源部署,本目录下所有未被禁用的yaml
+ - chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-config && chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-data
+ - cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r
+ - 先装elasticsearch相关
+ - 执行./createSecure_passwd.sh
+ 此时建议停下来看看elasticsearch的pod的状态是否正常,否则后面的步骤没有意义: microk8s.kubectl get all -n efk
+ - 再装fluentd相关
+ - 执行 ./createFluentdAccoutnIn.sh
+ - 最后装kibana相关
+
+--------
+
+option2:
+
+1. 所有yaml都apply完了再 执行这个
+
+```
+./createSecure_passwd.sh
+```
+
+创建了
+账号:elastic
+密码:your_secure_password
+
+2. 然后创建服务账号给kibana, 并重新部署
+
+```
+./refreshTokenForKibana.sh
+```
+3. 确保已经把最新的traefik的loadbalance的ip配置到/etc/nginx/nginx.conf 的upstreeam里
+--------------
+上述option 二选一
+不管怎么样,最后: 浏览:http://kibana.k8s.xunlang.home
+
+在kibana的dev tool中执行:
+
+```
+PUT _index_template/logstash_template
+{
+ "index_patterns": ["logstash-*"],
+ "template": {
+ "settings": {
+ "number_of_replicas": 0
+ }
+ }
+}
+```
+
+如果已经fluentd已经开始上报log,需删掉已有index:
+
+```
+DELETE _template/logstash_template
+DELETE /logstash-2024.11.09
+
+```
diff --git a/efk_cluster/aa.txt b/efk_cluster/aa.txt
new file mode 100644
index 0000000..53bd39d
--- /dev/null
+++ b/efk_cluster/aa.txt
@@ -0,0 +1,16 @@
+
+ @type http
+ @id input_http
+ port 8888
+ tag sardine.log
+
+
+ @type elasticsearch
+ @id output_elasticsearch
+ host elasticsearch
+ port 9200
+ scheme http
+ user fluentd_user
+ password fluentd_password
+ index_name logstash-sardine-%Y.%m.%d
+
diff --git a/efk_cluster/aa.yaml b/efk_cluster/aa.yaml
new file mode 100644
index 0000000..72a98f5
--- /dev/null
+++ b/efk_cluster/aa.yaml
@@ -0,0 +1,11 @@
+apiVersion: kibana.k8s.elastic.co/v1
+kind: Secret
+metadata:
+ name: kibana-sample
+spec:
+ version: 8.15.1
+ count: 3
+ elasticsearchRef:
+ name: "elasticsearch-sample"
+ secureSettings:
+ - secretName: kibana-secret-settings
diff --git a/efk_cluster/backUpElasticSearch.sh b/efk_cluster/backUpElasticSearch.sh
new file mode 100755
index 0000000..bfa019c
--- /dev/null
+++ b/efk_cluster/backUpElasticSearch.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+# 设置日期格式:例如 2024-12-05-23
+CURRENT_DATE=$(date "+%Y-%m-%d-%H")
+
+# 获取 Elasticsearch 的 CLUSTER-IP
+ELASTICSEARCH_IP=$(microk8s.kubectl get service/elasticsearch -n efk -o json | jq -r '.spec.clusterIP')
+
+# 调试:检查获取到的 IP 地址
+echo "Elasticsearch IP: ${ELASTICSEARCH_IP}"
+
+# 如果获取到的 IP 地址为空,则退出脚本
+if [ -z "$ELASTICSEARCH_IP" ]; then
+ echo "无法获取到 Elasticsearch IP 地址,退出脚本。"
+ exit 1
+fi
+
+# Elasticsearch 用户名和密码
+USER="elastic"
+PASSWORD="your_secure_password"
+
+# 尝试删除当前小时的备份(如果存在)
+DELETE_CURRENT_SNAPSHOT_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}"
+echo "尝试删除当前小时的备份:${DELETE_CURRENT_SNAPSHOT_URL}"
+
+curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_CURRENT_SNAPSHOT_URL}"
+
+# 检查删除操作是否成功
+if [ $? -eq 0 ]; then
+ echo "成功删除当前小时的备份:${CURRENT_DATE}"
+else
+ echo "当前小时的备份不存在,继续进行创建操作。"
+fi
+
+# 创建当前小时的快照
+BACKUP_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}"
+echo "备份请求的 URL: ${BACKUP_URL}"
+
+curl -u ${USER}:${PASSWORD} -X PUT "${BACKUP_URL}" -H 'Content-Type: application/json' -d '{
+ "indices": "*",
+ "ignore_unavailable": true,
+ "include_global_state": false
+}'
+
+# 检查备份是否成功
+if [ $? -eq 0 ]; then
+ echo "备份成功:${CURRENT_DATE}"
+
+ # 删除7天前的备份
+ OLD_SNAPSHOT_DATE=$(date --date='7 days ago' "+%Y-%m-%d-%H")
+ DELETE_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${OLD_SNAPSHOT_DATE}"
+
+ # 打印删除请求的 URL 进行调试
+ echo "删除旧备份请求的 URL: ${DELETE_URL}"
+
+ # 使用 curl 删除旧备份
+ curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_URL}"
+ if [ $? -eq 0 ]; then
+ echo "成功删除7天前的备份:${OLD_SNAPSHOT_DATE}"
+ else
+ echo "删除7天前的备份失败:${OLD_SNAPSHOT_DATE}"
+ fi
+else
+ echo "备份失败:${CURRENT_DATE}"
+fi
+echo "当前快照列表:"
+curl -u elastic:your_secure_password -X GET "http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/_all" | jq -r '.snapshots[].snapshot'
+
diff --git a/efk_cluster/config/elasticsearch-plugins.example.yml b/efk_cluster/config/elasticsearch-plugins.example.yml
new file mode 100644
index 0000000..b6874e9
--- /dev/null
+++ b/efk_cluster/config/elasticsearch-plugins.example.yml
@@ -0,0 +1,27 @@
+# Rename this file to `elasticsearch-plugins.yml` to use it.
+#
+# All plugins must be listed here. If you add a plugin to this list and run
+# `elasticsearch-plugin sync`, that plugin will be installed. If you remove
+# a plugin from this list, that plugin will be removed when Elasticsearch
+# next starts.
+
+plugins:
+ # Each plugin must have an ID. Plugins with only an ID are official plugins and will be downloaded from Elastic.
+ - id: example-id
+
+ # Plugins can be specified by URL (it doesn't have to be HTTP, you could use e.g. `file:`)
+ - id: example-with-url
+ location: https://some.domain/path/example4.zip
+
+ # Or by maven coordinates:
+ - id: example-with-maven-url
+ location: org.elasticsearch.plugins:example-plugin:1.2.3
+
+ # A proxy can also be configured per-plugin, if necessary
+ - id: example-with-proxy
+ location: https://some.domain/path/example.zip
+ proxy: https://some.domain:1234
+
+# Configures a proxy for all network access. Remove this if you don't need
+# to use a proxy.
+proxy: https://some.domain:1234
diff --git a/efk_cluster/config/elasticsearch.keystore b/efk_cluster/config/elasticsearch.keystore
new file mode 100644
index 0000000..1e248cb
Binary files /dev/null and b/efk_cluster/config/elasticsearch.keystore differ
diff --git a/efk_cluster/config/elasticsearch.yml b/efk_cluster/config/elasticsearch.yml
new file mode 100644
index 0000000..50b1547
--- /dev/null
+++ b/efk_cluster/config/elasticsearch.yml
@@ -0,0 +1,2 @@
+cluster.name: "docker-cluster"
+network.host: 0.0.0.0
diff --git a/efk_cluster/config/jvm.options b/efk_cluster/config/jvm.options
new file mode 100644
index 0000000..9354ef4
--- /dev/null
+++ b/efk_cluster/config/jvm.options
@@ -0,0 +1,75 @@
+################################################################
+##
+## JVM configuration
+##
+################################################################
+##
+## WARNING: DO NOT EDIT THIS FILE. If you want to override the
+## JVM options in this file, or set any additional options, you
+## should create one or more files in the jvm.options.d
+## directory containing your adjustments.
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/jvm-options.html
+## for more information.
+##
+################################################################
+
+
+
+################################################################
+## IMPORTANT: JVM heap size
+################################################################
+##
+## The heap size is automatically configured by Elasticsearch
+## based on the available memory in your system and the roles
+## each node is configured to fulfill. If specifying heap is
+## required, it should be done through a file in jvm.options.d,
+## which should be named with .options suffix, and the min and
+## max should be set to the same value. For example, to set the
+## heap to 4 GB, create a new file in the jvm.options.d
+## directory containing these lines:
+##
+## -Xms4g
+## -Xmx4g
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/heap-size.html
+## for more information
+##
+################################################################
+
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below here are considered expert settings. Do
+## not adjust them unless you understand what you are doing. Do
+## not edit them in this file; instead, create a new file in the
+## jvm.options.d directory containing your adjustments.
+##
+################################################################
+
+-XX:+UseG1GC
+
+## JVM temporary directory
+-Djava.io.tmpdir=${ES_TMPDIR}
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails; heap dumps
+# are created in the working directory of the JVM unless an alternative path is
+# specified
+-XX:+HeapDumpOnOutOfMemoryError
+
+# exit right after heap dump on out of memory error
+-XX:+ExitOnOutOfMemoryError
+
+# specify an alternative path for heap dumps; ensure the directory exists and
+# has sufficient space
+-XX:HeapDumpPath=data
+
+# specify an alternative path for JVM fatal error logs
+-XX:ErrorFile=logs/hs_err_pid%p.log
+
+## GC logging
+-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m
diff --git a/efk_cluster/config/log4j2.file.properties b/efk_cluster/config/log4j2.file.properties
new file mode 100644
index 0000000..5bb63ce
--- /dev/null
+++ b/efk_cluster/config/log4j2.file.properties
@@ -0,0 +1,279 @@
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%consoleException%n
+
+######## Server JSON ############################
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
+appender.rolling.layout.type = ECSJsonLayout
+appender.rolling.layout.dataset = elasticsearch.server
+
+appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+######## Server - old style pattern ###########
+appender.rolling_old.type = RollingFile
+appender.rolling_old.name = rolling_old
+appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling_old.layout.type = PatternLayout
+appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
+
+appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
+appender.rolling_old.policies.type = Policies
+appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling_old.policies.time.interval = 1
+appender.rolling_old.policies.time.modulate = true
+appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling_old.policies.size.size = 128MB
+appender.rolling_old.strategy.type = DefaultRolloverStrategy
+appender.rolling_old.strategy.fileIndex = nomax
+appender.rolling_old.strategy.action.type = Delete
+appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling_old.strategy.action.condition.type = IfFileName
+appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
+appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+rootLogger.appenderRef.rolling_old.ref = rolling_old
+
+######## Deprecation JSON #######################
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
+appender.deprecation_rolling.layout.type = ECSJsonLayout
+# Intentionally follows a different pattern to above
+appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
+appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
+
+appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+appender.header_warning.type = HeaderWarningAppender
+appender.header_warning.name = header_warning
+#################################################
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = WARN
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.appenderRef.header_warning.ref = header_warning
+logger.deprecation.additivity = false
+
+######## Search slowlog JSON ####################
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog.json
+appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog
+
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog-%i.json.gz
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.size.size = 1GB
+appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_search_slowlog_rolling.strategy.max = 4
+#################################################
+
+#################################################
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+######## Indexing slowlog JSON ##################
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog.json
+appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog
+
+
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog-%i.json.gz
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
+appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_indexing_slowlog_rolling.strategy.max = 4
+#################################################
+
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
+
+
+logger.org_apache_pdfbox.name = org.apache.pdfbox
+logger.org_apache_pdfbox.level = off
+
+logger.org_apache_poi.name = org.apache.poi
+logger.org_apache_poi.level = off
+
+logger.org_apache_fontbox.name = org.apache.fontbox
+logger.org_apache_fontbox.level = off
+
+logger.org_apache_xmlbeans.name = org.apache.xmlbeans
+logger.org_apache_xmlbeans.level = off
+
+
+logger.com_amazonaws.name = com.amazonaws
+logger.com_amazonaws.level = warn
+
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
+
+logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
+logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
+
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error
+
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error
+
+
+appender.audit_rolling.type = RollingFile
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "type":"audit", \
+ "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
+ %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
+ %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
+ %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
+ %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
+ %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
+ %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
+ %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
+ %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
+ %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{, "put":%map{put}}\
+ %varsNotEmpty{, "delete":%map{delete}}\
+ %varsNotEmpty{, "change":%map{change}}\
+ %varsNotEmpty{, "create":%map{create}}\
+ %varsNotEmpty{, "invalidate":%map{invalidate}}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
+# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain
+# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
+# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
+# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain
+# "user.roles" the roles array of the user; these are the roles that are granting privileges
+# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
+# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
+# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
+# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
+# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster
+# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
+# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
+# "realm_domain" if "realm" is under a domain, this is the name of the domain
+# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
+# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
+# "request.body" the content of the request body entity, JSON escaped
+# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
+# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "trace_id" an identifier conveyed by the part of "traceparent" request header
+# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
+# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
+# "rule" name of the applied rule if the "origin.type" is "ip_filter"
+# the "put", "delete", "change", "create", "invalidate" fields are only present
+# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
+
+appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}-%i.json.gz
+appender.audit_rolling.policies.type = Policies
+appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.audit_rolling.policies.time.interval = 1
+appender.audit_rolling.policies.time.modulate = true
+appender.audit_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.audit_rolling.policies.size.size = 1GB
+appender.audit_rolling.strategy.type = DefaultRolloverStrategy
+appender.audit_rolling.strategy.fileIndex = nomax
+
+logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
diff --git a/efk_cluster/config/log4j2.properties b/efk_cluster/config/log4j2.properties
new file mode 100644
index 0000000..c0d67c8
--- /dev/null
+++ b/efk_cluster/config/log4j2.properties
@@ -0,0 +1,193 @@
+status = error
+
+######## Server JSON ############################
+appender.rolling.type = Console
+appender.rolling.name = rolling
+appender.rolling.layout.type = ECSJsonLayout
+appender.rolling.layout.dataset = elasticsearch.server
+
+################################################
+
+################################################
+
+rootLogger.level = info
+rootLogger.appenderRef.rolling.ref = rolling
+
+######## Deprecation JSON #######################
+appender.deprecation_rolling.type = Console
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.layout.type = ECSJsonLayout
+# Intentionally follows a different pattern to above
+appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
+appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
+
+appender.header_warning.type = HeaderWarningAppender
+appender.header_warning.name = header_warning
+#################################################
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = WARN
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.appenderRef.header_warning.ref = header_warning
+logger.deprecation.additivity = false
+
+######## Search slowlog JSON ####################
+appender.index_search_slowlog_rolling.type = Console
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog
+
+#################################################
+
+#################################################
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+######## Indexing slowlog JSON ##################
+appender.index_indexing_slowlog_rolling.type = Console
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog
+
+#################################################
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
+
+logger.org_apache_pdfbox.name = org.apache.pdfbox
+logger.org_apache_pdfbox.level = off
+
+logger.org_apache_poi.name = org.apache.poi
+logger.org_apache_poi.level = off
+
+logger.org_apache_fontbox.name = org.apache.fontbox
+logger.org_apache_fontbox.level = off
+
+logger.org_apache_xmlbeans.name = org.apache.xmlbeans
+logger.org_apache_xmlbeans.level = off
+
+logger.com_amazonaws.name = com.amazonaws
+logger.com_amazonaws.level = warn
+
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
+
+logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
+logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
+
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error
+
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error
+
+appender.audit_rolling.type = Console
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "type":"audit", \
+ "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
+ %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
+ %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
+ %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
+ %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
+ %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
+ %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
+ %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
+ %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
+ %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{, "put":%map{put}}\
+ %varsNotEmpty{, "delete":%map{delete}}\
+ %varsNotEmpty{, "change":%map{change}}\
+ %varsNotEmpty{, "create":%map{create}}\
+ %varsNotEmpty{, "invalidate":%map{invalidate}}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
+# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain
+# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
+# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
+# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain
+# "user.roles" the roles array of the user; these are the roles that are granting privileges
+# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
+# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
+# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
+# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
+# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster
+# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
+# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
+# "realm_domain" if "realm" is under a domain, this is the name of the domain
+# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
+# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
+# "request.body" the content of the request body entity, JSON escaped
+# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
+# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "trace_id" an identifier conveyed by the part of "traceparent" request header
+# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
+# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
+# "rule" name of the applied rule if the "origin.type" is "ip_filter"
+# the "put", "delete", "change", "create", "invalidate" fields are only present
+# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
+
+logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
\ No newline at end of file
diff --git a/efk_cluster/config/role_mapping.yml b/efk_cluster/config/role_mapping.yml
new file mode 100644
index 0000000..68c82f7
--- /dev/null
+++ b/efk_cluster/config/role_mapping.yml
@@ -0,0 +1,14 @@
+# Role mapping configuration file which has elasticsearch roles as keys
+# that map to one or more user or group distinguished names
+
+#roleA: this is an elasticsearch role
+# - groupA-DN this is a group distinguished name
+# - groupB-DN
+# - user1-DN this is the full user distinguished name
+
+#power_user:
+# - "cn=admins,dc=example,dc=com"
+#user:
+# - "cn=users,dc=example,dc=com"
+# - "cn=admins,dc=example,dc=com"
+# - "cn=John Doe,cn=other users,dc=example,dc=com"
diff --git a/efk_cluster/config/roles.yml b/efk_cluster/config/roles.yml
new file mode 100644
index 0000000..68e003b
--- /dev/null
+++ b/efk_cluster/config/roles.yml
@@ -0,0 +1,3 @@
+# The default roles file is empty as the preferred method of defining roles is
+# through the API/UI. File based roles are useful in error scenarios when the
+# API based roles may not be available.
diff --git a/efk_cluster/config/service_tokens b/efk_cluster/config/service_tokens
new file mode 100644
index 0000000..3c74311
--- /dev/null
+++ b/efk_cluster/config/service_tokens
@@ -0,0 +1 @@
+elastic/kibana/my-token:{PBKDF2_STRETCH}10000$hqtLfPNEvoHIGGMhgpcrdTNCaXBqHGIOAj7ndDmt8w8=$073Kw/8neGbcNJQAi37DhyEKiIvYIM4MkzvuflndCbg=
diff --git a/efk_cluster/config/users b/efk_cluster/config/users
new file mode 100644
index 0000000..e69de29
diff --git a/efk_cluster/config/users_roles b/efk_cluster/config/users_roles
new file mode 100644
index 0000000..e69de29
diff --git a/efk_cluster/createFluentdAccoutnIn.sh b/efk_cluster/createFluentdAccoutnIn.sh
new file mode 100755
index 0000000..abad64e
--- /dev/null
+++ b/efk_cluster/createFluentdAccoutnIn.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+ELASTIC_PASSWORD="your_secure_password"
+ELASTIC_HOST=$(microk8s.kubectl get svc elasticsearch -n efk -o wide | awk 'NR==2 {print $3}')
+echo $ES_IP
+
+curl -X PUT "http://${ELASTIC_HOST}:9200/_security/role/fluentd_writer" \
+-u elastic:$ELASTIC_PASSWORD \
+-H "Content-Type: application/json" \
+-d '{
+ "cluster": ["monitor"],
+ "indices": [
+ {
+ "names": ["logstash-*"],
+ "privileges": ["write", "create_index"]
+ }
+ ]
+ }'
+echo "\n"
+curl -X PUT "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \
+-u elastic:$ELASTIC_PASSWORD \
+-H "Content-Type: application/json" \
+-d '{
+ "password": "fluentd_password",
+ "roles": ["fluentd_writer"]
+ }'
+
+
+echo "\n"
+curl -X GET "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \
+-u elastic:$ELASTIC_PASSWORD
+
+echo "\n"
diff --git a/efk_cluster/createSecure_passwd_forES.sh b/efk_cluster/createSecure_passwd_forES.sh
new file mode 100755
index 0000000..cbd9022
--- /dev/null
+++ b/efk_cluster/createSecure_passwd_forES.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+microk8s kubectl create secret generic elasticsearch-secret -n efk \
+ --from-literal=elastic_password='your_secure_password'
diff --git a/efk_cluster/efk-namespace.yaml b/efk_cluster/efk-namespace.yaml
new file mode 100644
index 0000000..2bdba40
--- /dev/null
+++ b/efk_cluster/efk-namespace.yaml
@@ -0,0 +1,5 @@
+# efk-namespace.yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: efk
diff --git a/efk_cluster/elasticsearch-cluster.yaml b/efk_cluster/elasticsearch-cluster.yaml
new file mode 100644
index 0000000..4a09cd0
--- /dev/null
+++ b/efk_cluster/elasticsearch-cluster.yaml
@@ -0,0 +1,342 @@
+# PVC 定义(已存在,无需重复创建)
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-0
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 300Gi
+ volumeName: elasticsearch-hot-pv
+ storageClassName: ""
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-1
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 128Gi
+ volumeName: elasticsearch-warm-pv
+ storageClassName: ""
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-2
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Gi
+ volumeName: elasticsearch-cold-pv
+ storageClassName: ""
+
+---
+# StatefulSet for elasticsearch-0 (hot)
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-0
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ - name: init-dirs
+ image: busybox
+ command: ["sh", "-c", "mkdir -p /data/data /data/config"]
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-0"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[master, data]"
+ - name: node.attr.data-tier
+ value: "hot"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ subPath: data
+ - name: data
+ mountPath: /usr/share/elasticsearch/config
+ subPath: config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-0"
+---
+# StatefulSet for elasticsearch-1 (warm)
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-1
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ - name: init-dirs
+ image: busybox
+ command: ["sh", "-c", "mkdir -p /data/data /data/config"]
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-1"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[data]"
+ - name: node.attr.data-tier
+ value: "warm"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ subPath: data
+ - name: data
+ mountPath: /usr/share/elasticsearch/config
+ subPath: config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-1"
+---
+# StatefulSet for elasticsearch-2 (cold)
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-2
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ - name: init-dirs
+ image: busybox
+ command: ["sh", "-c", "mkdir -p /data/data /data/config"]
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ securityContext:
+ runAsUser: 0 # 以 root 运行
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-2"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[data]"
+ - name: node.attr.data-tier
+ value: "cold"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ subPath: data
+ - name: data
+ mountPath: /usr/share/elasticsearch/config
+ subPath: config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-2"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: elasticsearch-service
+ namespace: efk
+spec:
+ clusterIP: None
+ selector:
+ app: elasticsearch
+ ports:
+ - port: 9200
+ name: http
+ - port: 9300
+ name: transport
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: elasticsearch-secret
+ namespace: efk
+type: Opaque
+data:
+ elastic_password: bXluZXdwYXNzd29yZA== # 示例密码 "mynewpassword"
diff --git a/efk_cluster/elasticsearch-configMap.yaml b/efk_cluster/elasticsearch-configMap.yaml
new file mode 100644
index 0000000..19645e1
--- /dev/null
+++ b/efk_cluster/elasticsearch-configMap.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: es-config
+ namespace: efk
+data:
+ node.roles.elasticsearch-0: "[master, data]"
+ node.roles.elasticsearch-1: "[data]"
+ node.roles.elasticsearch-2: "[data]"
+ node.attr.data-tier.elasticsearch-0: "hot"
+ node.attr.data-tier.elasticsearch-1: "warm"
+ node.attr.data-tier.elasticsearch-2: "cold"
+
diff --git a/efk_cluster/elasticsearch-deployment.yaml b/efk_cluster/elasticsearch-deployment.yaml
new file mode 100644
index 0000000..23271b6
--- /dev/null
+++ b/efk_cluster/elasticsearch-deployment.yaml
@@ -0,0 +1,55 @@
+# elasticsearch-deployment-8.8.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: elasticsearch
+ namespace: efk
+ labels:
+ app: elasticsearch
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ ports:
+ - containerPort: 9200
+ - containerPort: 9300
+ env:
+ - name: discovery.type
+ value: "single-node"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g" # 设置 JVM 堆大小为 2GB-4GB
+ resources: # 增加资源限制
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data-volume
+ mountPath: /usr/share/elasticsearch/data
+ - name: config-volume
+ mountPath: /usr/share/elasticsearch/config
+ volumes:
+ - name: data-volume
+ persistentVolumeClaim:
+ claimName: elasticsearch-data-pvc
+ - name: config-volume
+ persistentVolumeClaim:
+ claimName: elasticsearch-config-pvc
diff --git a/efk_cluster/elasticsearch-ingress.yaml b/efk_cluster/elasticsearch-ingress.yaml
new file mode 100644
index 0000000..a38e1fd
--- /dev/null
+++ b/efk_cluster/elasticsearch-ingress.yaml
@@ -0,0 +1,18 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: elasticsearch
+ namespace: efk
+spec:
+ ingressClassName: traefik
+ rules:
+ - host: elastic.k8s.xunlang.home
+ http:
+ paths:
+ - backend:
+ service:
+ name: elasticsearch # 指向新的 Service
+ port:
+ number: 9200
+ path: /
+ pathType: Prefix
diff --git a/efk_cluster/elasticsearch-pv.yaml b/efk_cluster/elasticsearch-pv.yaml
new file mode 100644
index 0000000..c1dbfe9
--- /dev/null
+++ b/efk_cluster/elasticsearch-pv.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: elasticsearch-data-pv
+spec:
+ capacity:
+ storage: 300Gi
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: microk8s-hostpath
+ hostPath:
+ path: /var/snap/microk8s/common/mnt/data/elasticsearch-data
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: elasticsearch-config-pv
+spec:
+ capacity:
+ storage: 10Mi
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: microk8s-hostpath
+ hostPath:
+ path: /var/snap/microk8s/common/mnt/data/elasticsearch-config
diff --git a/efk_cluster/elasticsearch-pvandpvc_incluster.yaml b/efk_cluster/elasticsearch-pvandpvc_incluster.yaml
new file mode 100644
index 0000000..5902d03
--- /dev/null
+++ b/efk_cluster/elasticsearch-pvandpvc_incluster.yaml
@@ -0,0 +1,91 @@
+# Hot 节点的 PV
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: elasticsearch-hot-pv
+spec:
+ capacity:
+ storage: 300Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ server: 10.71.142.1 # fenny
+ path: /root/elasticsearch/hotData
+
+---
+# Warm 节点的 PV
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: elasticsearch-warm-pv
+spec:
+ capacity:
+ storage: 128Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ server: 10.71.142.1 # fenny
+ path: /root/elasticsearch/warmData
+
+---
+# Cold 节点的 PV
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: elasticsearch-cold-pv
+spec:
+ capacity:
+ storage: 500Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ server: 10.71.142.1 # fenny
+ path: /root/elasticsearch/coldData
+
+---
+# PVC for Hot
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-0
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 300Gi
+ volumeName: elasticsearch-hot-pv
+ storageClassName: "" # 显式禁用 StorageClass
+
+---
+# PVC for Warm
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-1
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 128Gi
+ volumeName: elasticsearch-warm-pv
+ storageClassName: "" # 显式禁用 StorageClass
+
+---
+# PVC for Cold
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-2
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Gi
+ volumeName: elasticsearch-cold-pv
+ storageClassName: "" # 显式禁用 StorageClass
diff --git a/efk_cluster/elasticsearch-pvc.yaml b/efk_cluster/elasticsearch-pvc.yaml
new file mode 100644
index 0000000..adb203f
--- /dev/null
+++ b/efk_cluster/elasticsearch-pvc.yaml
@@ -0,0 +1,43 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-0
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 300Gi
+ volumeName: elasticsearch-hot-pv
+ storageClassName: "" # 禁用默认 StorageClass
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-1
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 128Gi
+ volumeName: elasticsearch-warm-pv
+ storageClassName: ""
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: elasticsearch-data-pvc-elasticsearch-2
+ namespace: efk
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 500Gi
+ volumeName: elasticsearch-cold-pv
+ storageClassName: ""
diff --git a/efk_cluster/elasticsearch-secret.yaml b/efk_cluster/elasticsearch-secret.yaml
new file mode 100644
index 0000000..a7eac0e
--- /dev/null
+++ b/efk_cluster/elasticsearch-secret.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: elasticsearch-secret
+ namespace: efk
+type: Opaque
+data:
+ elastic_password: eW91cl9zZWN1cmVfcGFzc3dvcmQ= # Base64 编码的 "your_secure_password"
diff --git a/efk_cluster/elasticsearch-service.yaml b/efk_cluster/elasticsearch-service.yaml
new file mode 100644
index 0000000..7375131
--- /dev/null
+++ b/efk_cluster/elasticsearch-service.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: elasticsearch
+ namespace: efk
+spec:
+ ports:
+ - port: 9200
+ targetPort: 9200
+ name: http
+ selector:
+ statefulset.kubernetes.io/pod-name: elasticsearch-0 # 只选择 elasticsearch-0
+ type: LoadBalancer # 保留 LoadBalancer 类型
diff --git a/efk_cluster/elasticsearch-statefulSet.yaml b/efk_cluster/elasticsearch-statefulSet.yaml
new file mode 100644
index 0000000..3b75178
--- /dev/null
+++ b/efk_cluster/elasticsearch-statefulSet.yaml
@@ -0,0 +1,269 @@
+# elasticsearch-0
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-0
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-0"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[master, data]"
+ - name: node.attr.data-tier
+ value: "hot"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ - name: config-volume
+ mountPath: /usr/share/elasticsearch/config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-0"
+ - name: config-volume
+ persistentVolumeClaim:
+ claimName: "elasticsearch-config-pvc"
+---
+# elasticsearch-1
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-1
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-1"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[data]"
+ - name: node.attr.data-tier
+ value: "warm"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ - name: config-volume
+ mountPath: /usr/share/elasticsearch/config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-1"
+ - name: config-volume
+ persistentVolumeClaim:
+ claimName: "elasticsearch-config-pvc"
+---
+# elasticsearch-2
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: elasticsearch-2
+ namespace: efk
+spec:
+ serviceName: elasticsearch-service
+ replicas: 1
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ initContainers:
+ - name: init-sysctl
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: cluster.name
+ value: "my-es-cluster"
+ - name: node.name
+ value: "elasticsearch-2"
+ - name: discovery.seed_hosts
+ value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
+ - name: cluster.initial_master_nodes
+ value: "elasticsearch-0"
+ - name: xpack.security.enabled
+ value: "true"
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-secret
+ key: elastic_password
+ - name: ES_JAVA_OPTS
+ value: "-Xms2g -Xmx4g"
+ - name: node.roles
+ value: "[data]"
+ - name: node.attr.data-tier
+ value: "cold"
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "1"
+ limits:
+ memory: "6Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ - name: config-volume
+ mountPath: /usr/share/elasticsearch/config
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values: ["elasticsearch"]
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: "elasticsearch-data-pvc-elasticsearch-2"
+ - name: config-volume
+ persistentVolumeClaim:
+ claimName: "elasticsearch-config-pvc"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: elasticsearch-service
+ namespace: efk
+spec:
+ clusterIP: None
+ selector:
+ app: elasticsearch
+ ports:
+ - port: 9200
+ name: http
+ - port: 9300
+ name: transport
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: elasticsearch-secret
+ namespace: efk
+type: Opaque
+data:
+ elastic_password: eW91cl9zZWN1cmVfcGFzc3dvcmQ= # Base64 编码的 "your_secure_password"
diff --git a/efk_cluster/fileBeat-config.yaml b/efk_cluster/fileBeat-config.yaml
new file mode 100644
index 0000000..600eee9
--- /dev/null
+++ b/efk_cluster/fileBeat-config.yaml
@@ -0,0 +1,62 @@
+# filebeat-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: filebeat-config
+ namespace: efk
+data:
+ filebeat.yml: |
+ filebeat.inputs:
+ - type: http_endpoint
+ enabled: true
+ listen_address: 0.0.0.0:8888
+ path: "/"
+ json.keys_under_root: true
+
+ processors:
+ # 提取路径中的变量(严格模式)
+ - dissect:
+ tokenizer: "/tanya.candle.%{currency}.%{year}.%{interval}"
+ field: "http.request.path"
+ target_prefix: ""
+ ignore_missing: false # 关键:关闭忽略缺失
+
+ # 强制设置默认值(即使字段为空)
+ - script:
+ lang: javascript
+ source: |
+ function process(event) {
+ // 先检查字段是否存在,不存在则设置默认值
+ if (!event.containsKey('currency') || event.get('currency') === '') {
+ event.put('currency', 'unknown');
+ }
+ if (!event.containsKey('year') || event.get('year') === '') {
+ event.put('year', '0000');
+ }
+ if (!event.containsKey('interval') || event.get('interval') === '') {
+ event.put('interval', '0D');
+ }
+ }
+
+ output.elasticsearch:
+ hosts: ["http://elasticsearch:9200"]
+ username: "fluentd_user"
+ password: "fluentd_password"
+ indices:
+ - index: "logstash-candle-${currency}-${year}-${interval}"
+ # 严格验证字段值非空
+ when.and:
+ - not.equals:
+ currency: ""
+ - not.equals:
+ year: ""
+ - not.equals:
+ interval: ""
+ - index: "fallback-index"
+ when.or:
+ - equals:
+ currency: ""
+ - equals:
+ year: ""
+ - equals:
+ interval: ""
diff --git a/efk_cluster/fileBeat-daemonset.yaml b/efk_cluster/fileBeat-daemonset.yaml
new file mode 100644
index 0000000..1a4ea40
--- /dev/null
+++ b/efk_cluster/fileBeat-daemonset.yaml
@@ -0,0 +1,38 @@
+# filebeat-daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: filebeat
+ namespace: efk
+spec:
+ selector:
+ matchLabels:
+ app: filebeat
+ template:
+ metadata:
+ labels:
+ app: filebeat
+ spec:
+ containers:
+ - name: filebeat
+ image: docker.elastic.co/beats/filebeat:8.12.0
+ args: [
+ "-c", "/etc/filebeat.yml",
+ "-e",
+ "-strict.perms=false"
+ ]
+ volumeMounts:
+ - name: config
+ mountPath: /etc/filebeat.yml
+ readOnly: true
+ subPath: filebeat.yml
+ - name: varlog
+ mountPath: /var/log
+ readOnly: true
+ volumes:
+ - name: config
+ configMap:
+ name: filebeat-config
+ - name: varlog
+ hostPath:
+ path: /var/log
diff --git a/efk_cluster/fileBeat-ingress.yaml b/efk_cluster/fileBeat-ingress.yaml
new file mode 100644
index 0000000..fa81654
--- /dev/null
+++ b/efk_cluster/fileBeat-ingress.yaml
@@ -0,0 +1,22 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: filebeat-ingress
+ namespace: efk
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /
+ # 添加 SSL 终止支持的注释,如果需要 TLS/SSL 支持
+ # nginx.ingress.kubernetes.io/ssl-redirect: "true"
+spec:
+ ingressClassName: traefik
+ rules:
+ - host: filebeat.k8s.xunlang.home
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: filebeat-service
+ port:
+ number: 8888
diff --git a/efk_cluster/fileBeat-service.yaml b/efk_cluster/fileBeat-service.yaml
new file mode 100644
index 0000000..ca32ead
--- /dev/null
+++ b/efk_cluster/fileBeat-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: filebeat-service
+ namespace: efk
+spec:
+ type: LoadBalancer
+ selector:
+ k8s-app: filebeat
+ ports:
+ - protocol: TCP
+ port: 8888 # Service 暴露的端口
+ targetPort: 8888 # Fluentd 容器内部的端口
+
diff --git a/efk_cluster/fluentd-configMap.yaml b/efk_cluster/fluentd-configMap.yaml
new file mode 100644
index 0000000..0226110
--- /dev/null
+++ b/efk_cluster/fluentd-configMap.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fluentd-config
+ namespace: efk
+data:
+ fluent.conf: |
+
+ @type http
+ @id input_http
+ port 8888
+ tag sardine.log
+ @label @main
+
+
+
+
+
+ @type stdout
+ @id output_stdout_all
+
+
diff --git a/efk_cluster/fluentd-configMap2.yaml b/efk_cluster/fluentd-configMap2.yaml
new file mode 100644
index 0000000..e698764
--- /dev/null
+++ b/efk_cluster/fluentd-configMap2.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fluentd-config
+ namespace: efk
+data:
+ fluent.conf: |
+
+ @type http
+ @id input_http
+ port 8888
+ @label @main
+ @log_level debug
+
+ @type json
+
+
+
+