es cluster
This commit is contained in:
parent
a94d2ed00b
commit
4585b34136
14
efk/init.sh
14
efk/init.sh
@ -1,11 +1,9 @@
|
|||||||
|
|
||||||
|
|
||||||
cd /home/ubuntu/k8sCongfigs/traefik
|
cd /home/ubuntu/k8sCongfigs/traefik
|
||||||
microk8s.kubectl apply -f traefik_role.yaml
|
microk8s.kubectl apply -f traefik_role.yaml
|
||||||
microk8s.kubectl apply -f traefik_role.yaml
|
microk8s.kubectl apply -f traefik_role.yaml
|
||||||
microk8s.kubectl apply -f traefik_roleBind.yaml
|
microk8s.kubectl apply -f traefik_roleBind.yaml
|
||||||
|
|
||||||
cd /home/ubuntu/k8sCongfigs/efk
|
cd /home/ubuntu/k8sCongfigs/efk
|
||||||
sudo su
|
sudo su
|
||||||
// 准备 es 配置
|
// 准备 es 配置
|
||||||
mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true
|
mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true
|
||||||
@ -14,6 +12,7 @@ cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r
|
|||||||
|
|
||||||
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/fluentd-x86-image.tar
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/fluentd-x86-image.tar
|
||||||
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/elasticsearch-8-8-0.tar
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/elasticsearch-8-8-0.tar
|
||||||
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/kibana-8-8-0.tar
|
||||||
|
|
||||||
// 创建 es 资源
|
// 创建 es 资源
|
||||||
microk8s.kubectl apply -f efk-namespace.yaml
|
microk8s.kubectl apply -f efk-namespace.yaml
|
||||||
@ -22,7 +21,7 @@ microk8s.kubectl apply -f elasticsearch-ingress.yaml
|
|||||||
microk8s.kubectl apply -f elasticsearch-pv.yaml
|
microk8s.kubectl apply -f elasticsearch-pv.yaml
|
||||||
microk8s.kubectl apply -f elasticsearch-pvc.yaml
|
microk8s.kubectl apply -f elasticsearch-pvc.yaml
|
||||||
microk8s.kubectl apply -f elasticsearch-service.yaml
|
microk8s.kubectl apply -f elasticsearch-service.yaml
|
||||||
// 这个时候正在创建elasticsearch的pod,需要拉取镜像,大概1个多小时,如果有离线的直接导入离线的镜像
|
// 这个时候正在创建elasticsearch的pod,需要拉取镜像,大概1个多小时,如果有离线的直接导入离线的镜像
|
||||||
sleep 60
|
sleep 60
|
||||||
./createSecure_passwd_forES.sh
|
./createSecure_passwd_forES.sh
|
||||||
./createFluentdAccoutnIn.sh
|
./createFluentdAccoutnIn.sh
|
||||||
@ -38,10 +37,3 @@ microk8s.kubectl apply -f kibana-ingress.yaml
|
|||||||
microk8s.kubectl apply -f kibana-service.yaml
|
microk8s.kubectl apply -f kibana-service.yaml
|
||||||
|
|
||||||
./refreshTokenForKibana.sh
|
./refreshTokenForKibana.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
73
efk_cluster/README.md
Normal file
73
efk_cluster/README.md
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
## 如何重新创建整个namespace
|
||||||
|
|
||||||
|
```
|
||||||
|
./init.sh
|
||||||
|
```
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
|
下面都都不用看了, 直接执行上面的语句
|
||||||
|
|
||||||
|
先删了 namespace efk,这样里面资源就都没了,除了pv
|
||||||
|
然后 microk8s.kubectl create namespace efk
|
||||||
|
|
||||||
|
|
||||||
|
option1:
|
||||||
|
1. 确保已经把本目录下config内容复制到 /var/snap/microk8s/common/mnt/data/elasticsearch-config
|
||||||
|
- sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-data -p || true
|
||||||
|
- sudo mkdir /var/snap/microk8s/common/mnt/data/elasticsearch-config -p || true
|
||||||
|
2. ekf空间下资源部署,本目录下所有未被禁用的yaml
|
||||||
|
- chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-config && chmod 777 /var/snap/microk8s/common/mnt/data/elasticsearch-data
|
||||||
|
- cp config/* /var/snap/microk8s/common/mnt/data/elasticsearch-config -r
|
||||||
|
- 先装elasticsearch相关
|
||||||
|
- 执行./createSecure_passwd.sh
|
||||||
|
此时建议停下来看看elasticsearch的pod的状态是否正常,否则后面的步骤没有意义: microk8s.kubectl get all -n efk
|
||||||
|
- 再装fluentd相关
|
||||||
|
- 执行 ./createFluentdAccoutnIn.sh
|
||||||
|
- 最后装kibana相关
|
||||||
|
|
||||||
|
--------
|
||||||
|
|
||||||
|
option2:
|
||||||
|
|
||||||
|
1. 所有yaml都apply完了再 执行这个
|
||||||
|
|
||||||
|
```
|
||||||
|
./createSecure_passwd.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
创建了
|
||||||
|
账号:elastic
|
||||||
|
密码:your_secure_password
|
||||||
|
|
||||||
|
2. 然后创建服务账号给kibana, 并重新部署
|
||||||
|
|
||||||
|
```
|
||||||
|
./refreshTokenForKibana.sh
|
||||||
|
```
|
||||||
|
3. 确保已经把最新的traefik的loadbalance的ip配置到/etc/nginx/nginx.conf 的upstreeam里
|
||||||
|
--------------
|
||||||
|
上述option 二选一
|
||||||
|
不管怎么样,最后: 浏览:http://kibana.k8s.xunlang.home
|
||||||
|
|
||||||
|
在kibana的dev tool中执行:
|
||||||
|
|
||||||
|
```
|
||||||
|
PUT _index_template/logstash_template
|
||||||
|
{
|
||||||
|
"index_patterns": ["logstash-*"],
|
||||||
|
"template": {
|
||||||
|
"settings": {
|
||||||
|
"number_of_replicas": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
如果已经fluentd已经开始上报log,需删掉已有index:
|
||||||
|
|
||||||
|
```
|
||||||
|
DELETE _template/logstash_template
|
||||||
|
DELETE /logstash-2024.11.09
|
||||||
|
|
||||||
|
```
|
16
efk_cluster/aa.txt
Normal file
16
efk_cluster/aa.txt
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
<source>
|
||||||
|
@type http
|
||||||
|
@id input_http
|
||||||
|
port 8888
|
||||||
|
tag sardine.log
|
||||||
|
</source>
|
||||||
|
<match sardine.log>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
index_name logstash-sardine-%Y.%m.%d
|
||||||
|
</match>
|
11
efk_cluster/aa.yaml
Normal file
11
efk_cluster/aa.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: kibana.k8s.elastic.co/v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: kibana-sample
|
||||||
|
spec:
|
||||||
|
version: 8.15.1
|
||||||
|
count: 3
|
||||||
|
elasticsearchRef:
|
||||||
|
name: "elasticsearch-sample"
|
||||||
|
secureSettings:
|
||||||
|
- secretName: kibana-secret-settings
|
68
efk_cluster/backUpElasticSearch.sh
Executable file
68
efk_cluster/backUpElasticSearch.sh
Executable file
@ -0,0 +1,68 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# 设置日期格式:例如 2024-12-05-23
|
||||||
|
CURRENT_DATE=$(date "+%Y-%m-%d-%H")
|
||||||
|
|
||||||
|
# 获取 Elasticsearch 的 CLUSTER-IP
|
||||||
|
ELASTICSEARCH_IP=$(microk8s.kubectl get service/elasticsearch -n efk -o json | jq -r '.spec.clusterIP')
|
||||||
|
|
||||||
|
# 调试:检查获取到的 IP 地址
|
||||||
|
echo "Elasticsearch IP: ${ELASTICSEARCH_IP}"
|
||||||
|
|
||||||
|
# 如果获取到的 IP 地址为空,则退出脚本
|
||||||
|
if [ -z "$ELASTICSEARCH_IP" ]; then
|
||||||
|
echo "无法获取到 Elasticsearch IP 地址,退出脚本。"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Elasticsearch 用户名和密码
|
||||||
|
USER="elastic"
|
||||||
|
PASSWORD="your_secure_password"
|
||||||
|
|
||||||
|
# 尝试删除当前小时的备份(如果存在)
|
||||||
|
DELETE_CURRENT_SNAPSHOT_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}"
|
||||||
|
echo "尝试删除当前小时的备份:${DELETE_CURRENT_SNAPSHOT_URL}"
|
||||||
|
|
||||||
|
curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_CURRENT_SNAPSHOT_URL}"
|
||||||
|
|
||||||
|
# 检查删除操作是否成功
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "成功删除当前小时的备份:${CURRENT_DATE}"
|
||||||
|
else
|
||||||
|
echo "当前小时的备份不存在,继续进行创建操作。"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 创建当前小时的快照
|
||||||
|
BACKUP_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${CURRENT_DATE}"
|
||||||
|
echo "备份请求的 URL: ${BACKUP_URL}"
|
||||||
|
|
||||||
|
curl -u ${USER}:${PASSWORD} -X PUT "${BACKUP_URL}" -H 'Content-Type: application/json' -d '{
|
||||||
|
"indices": "*",
|
||||||
|
"ignore_unavailable": true,
|
||||||
|
"include_global_state": false
|
||||||
|
}'
|
||||||
|
|
||||||
|
# 检查备份是否成功
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "备份成功:${CURRENT_DATE}"
|
||||||
|
|
||||||
|
# 删除7天前的备份
|
||||||
|
OLD_SNAPSHOT_DATE=$(date --date='7 days ago' "+%Y-%m-%d-%H")
|
||||||
|
DELETE_URL="http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/snap-${OLD_SNAPSHOT_DATE}"
|
||||||
|
|
||||||
|
# 打印删除请求的 URL 进行调试
|
||||||
|
echo "删除旧备份请求的 URL: ${DELETE_URL}"
|
||||||
|
|
||||||
|
# 使用 curl 删除旧备份
|
||||||
|
curl -u ${USER}:${PASSWORD} -X DELETE "${DELETE_URL}"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "成功删除7天前的备份:${OLD_SNAPSHOT_DATE}"
|
||||||
|
else
|
||||||
|
echo "删除7天前的备份失败:${OLD_SNAPSHOT_DATE}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "备份失败:${CURRENT_DATE}"
|
||||||
|
fi
|
||||||
|
echo "当前快照列表:"
|
||||||
|
curl -u elastic:your_secure_password -X GET "http://${ELASTICSEARCH_IP}:9200/_snapshot/my_backup/_all" | jq -r '.snapshots[].snapshot'
|
||||||
|
|
27
efk_cluster/config/elasticsearch-plugins.example.yml
Normal file
27
efk_cluster/config/elasticsearch-plugins.example.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Rename this file to `elasticsearch-plugins.yml` to use it.
|
||||||
|
#
|
||||||
|
# All plugins must be listed here. If you add a plugin to this list and run
|
||||||
|
# `elasticsearch-plugin sync`, that plugin will be installed. If you remove
|
||||||
|
# a plugin from this list, that plugin will be removed when Elasticsearch
|
||||||
|
# next starts.
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
# Each plugin must have an ID. Plugins with only an ID are official plugins and will be downloaded from Elastic.
|
||||||
|
- id: example-id
|
||||||
|
|
||||||
|
# Plugins can be specified by URL (it doesn't have to be HTTP, you could use e.g. `file:`)
|
||||||
|
- id: example-with-url
|
||||||
|
location: https://some.domain/path/example4.zip
|
||||||
|
|
||||||
|
# Or by maven coordinates:
|
||||||
|
- id: example-with-maven-url
|
||||||
|
location: org.elasticsearch.plugins:example-plugin:1.2.3
|
||||||
|
|
||||||
|
# A proxy can also be configured per-plugin, if necessary
|
||||||
|
- id: example-with-proxy
|
||||||
|
location: https://some.domain/path/example.zip
|
||||||
|
proxy: https://some.domain:1234
|
||||||
|
|
||||||
|
# Configures a proxy for all network access. Remove this if you don't need
|
||||||
|
# to use a proxy.
|
||||||
|
proxy: https://some.domain:1234
|
BIN
efk_cluster/config/elasticsearch.keystore
Normal file
BIN
efk_cluster/config/elasticsearch.keystore
Normal file
Binary file not shown.
2
efk_cluster/config/elasticsearch.yml
Normal file
2
efk_cluster/config/elasticsearch.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
cluster.name: "docker-cluster"
|
||||||
|
network.host: 0.0.0.0
|
75
efk_cluster/config/jvm.options
Normal file
75
efk_cluster/config/jvm.options
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
################################################################
|
||||||
|
##
|
||||||
|
## JVM configuration
|
||||||
|
##
|
||||||
|
################################################################
|
||||||
|
##
|
||||||
|
## WARNING: DO NOT EDIT THIS FILE. If you want to override the
|
||||||
|
## JVM options in this file, or set any additional options, you
|
||||||
|
## should create one or more files in the jvm.options.d
|
||||||
|
## directory containing your adjustments.
|
||||||
|
##
|
||||||
|
## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/jvm-options.html
|
||||||
|
## for more information.
|
||||||
|
##
|
||||||
|
################################################################
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
################################################################
|
||||||
|
## IMPORTANT: JVM heap size
|
||||||
|
################################################################
|
||||||
|
##
|
||||||
|
## The heap size is automatically configured by Elasticsearch
|
||||||
|
## based on the available memory in your system and the roles
|
||||||
|
## each node is configured to fulfill. If specifying heap is
|
||||||
|
## required, it should be done through a file in jvm.options.d,
|
||||||
|
## which should be named with .options suffix, and the min and
|
||||||
|
## max should be set to the same value. For example, to set the
|
||||||
|
## heap to 4 GB, create a new file in the jvm.options.d
|
||||||
|
## directory containing these lines:
|
||||||
|
##
|
||||||
|
## -Xms4g
|
||||||
|
## -Xmx4g
|
||||||
|
##
|
||||||
|
## See https://www.elastic.co/guide/en/elasticsearch/reference/8.8/heap-size.html
|
||||||
|
## for more information
|
||||||
|
##
|
||||||
|
################################################################
|
||||||
|
|
||||||
|
|
||||||
|
################################################################
|
||||||
|
## Expert settings
|
||||||
|
################################################################
|
||||||
|
##
|
||||||
|
## All settings below here are considered expert settings. Do
|
||||||
|
## not adjust them unless you understand what you are doing. Do
|
||||||
|
## not edit them in this file; instead, create a new file in the
|
||||||
|
## jvm.options.d directory containing your adjustments.
|
||||||
|
##
|
||||||
|
################################################################
|
||||||
|
|
||||||
|
-XX:+UseG1GC
|
||||||
|
|
||||||
|
## JVM temporary directory
|
||||||
|
-Djava.io.tmpdir=${ES_TMPDIR}
|
||||||
|
|
||||||
|
## heap dumps
|
||||||
|
|
||||||
|
# generate a heap dump when an allocation from the Java heap fails; heap dumps
|
||||||
|
# are created in the working directory of the JVM unless an alternative path is
|
||||||
|
# specified
|
||||||
|
-XX:+HeapDumpOnOutOfMemoryError
|
||||||
|
|
||||||
|
# exit right after heap dump on out of memory error
|
||||||
|
-XX:+ExitOnOutOfMemoryError
|
||||||
|
|
||||||
|
# specify an alternative path for heap dumps; ensure the directory exists and
|
||||||
|
# has sufficient space
|
||||||
|
-XX:HeapDumpPath=data
|
||||||
|
|
||||||
|
# specify an alternative path for JVM fatal error logs
|
||||||
|
-XX:ErrorFile=logs/hs_err_pid%p.log
|
||||||
|
|
||||||
|
## GC logging
|
||||||
|
-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m
|
279
efk_cluster/config/log4j2.file.properties
Normal file
279
efk_cluster/config/log4j2.file.properties
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
status = error
|
||||||
|
|
||||||
|
appender.console.type = Console
|
||||||
|
appender.console.name = console
|
||||||
|
appender.console.layout.type = PatternLayout
|
||||||
|
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%consoleException%n
|
||||||
|
|
||||||
|
######## Server JSON ############################
|
||||||
|
appender.rolling.type = RollingFile
|
||||||
|
appender.rolling.name = rolling
|
||||||
|
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
|
||||||
|
appender.rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.rolling.layout.dataset = elasticsearch.server
|
||||||
|
|
||||||
|
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
|
||||||
|
appender.rolling.policies.type = Policies
|
||||||
|
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.rolling.policies.time.interval = 1
|
||||||
|
appender.rolling.policies.time.modulate = true
|
||||||
|
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.rolling.policies.size.size = 128MB
|
||||||
|
appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.rolling.strategy.fileIndex = nomax
|
||||||
|
appender.rolling.strategy.action.type = Delete
|
||||||
|
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
||||||
|
appender.rolling.strategy.action.condition.type = IfFileName
|
||||||
|
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
||||||
|
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
||||||
|
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
|
||||||
|
################################################
|
||||||
|
######## Server - old style pattern ###########
|
||||||
|
appender.rolling_old.type = RollingFile
|
||||||
|
appender.rolling_old.name = rolling_old
|
||||||
|
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||||
|
appender.rolling_old.layout.type = PatternLayout
|
||||||
|
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||||
|
|
||||||
|
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
||||||
|
appender.rolling_old.policies.type = Policies
|
||||||
|
appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.rolling_old.policies.time.interval = 1
|
||||||
|
appender.rolling_old.policies.time.modulate = true
|
||||||
|
appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.rolling_old.policies.size.size = 128MB
|
||||||
|
appender.rolling_old.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.rolling_old.strategy.fileIndex = nomax
|
||||||
|
appender.rolling_old.strategy.action.type = Delete
|
||||||
|
appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
|
||||||
|
appender.rolling_old.strategy.action.condition.type = IfFileName
|
||||||
|
appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
||||||
|
appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
||||||
|
appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
|
||||||
|
################################################
|
||||||
|
|
||||||
|
rootLogger.level = info
|
||||||
|
rootLogger.appenderRef.console.ref = console
|
||||||
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
|
rootLogger.appenderRef.rolling_old.ref = rolling_old
|
||||||
|
|
||||||
|
######## Deprecation JSON #######################
|
||||||
|
appender.deprecation_rolling.type = RollingFile
|
||||||
|
appender.deprecation_rolling.name = deprecation_rolling
|
||||||
|
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
|
||||||
|
appender.deprecation_rolling.layout.type = ECSJsonLayout
|
||||||
|
# Intentionally follows a different pattern to above
|
||||||
|
appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
|
||||||
|
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
|
||||||
|
|
||||||
|
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
|
||||||
|
appender.deprecation_rolling.policies.type = Policies
|
||||||
|
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.deprecation_rolling.policies.size.size = 1GB
|
||||||
|
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.deprecation_rolling.strategy.max = 4
|
||||||
|
|
||||||
|
appender.header_warning.type = HeaderWarningAppender
|
||||||
|
appender.header_warning.name = header_warning
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
logger.deprecation.name = org.elasticsearch.deprecation
|
||||||
|
logger.deprecation.level = WARN
|
||||||
|
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||||
|
logger.deprecation.appenderRef.header_warning.ref = header_warning
|
||||||
|
logger.deprecation.additivity = false
|
||||||
|
|
||||||
|
######## Search slowlog JSON ####################
|
||||||
|
appender.index_search_slowlog_rolling.type = RollingFile
|
||||||
|
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||||
|
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
|
||||||
|
.cluster_name}_index_search_slowlog.json
|
||||||
|
appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog
|
||||||
|
|
||||||
|
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
|
||||||
|
.cluster_name}_index_search_slowlog-%i.json.gz
|
||||||
|
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||||
|
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.index_search_slowlog_rolling.policies.size.size = 1GB
|
||||||
|
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.index_search_slowlog_rolling.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||||
|
logger.index_search_slowlog_rolling.level = trace
|
||||||
|
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
||||||
|
logger.index_search_slowlog_rolling.additivity = false
|
||||||
|
|
||||||
|
######## Indexing slowlog JSON ##################
|
||||||
|
appender.index_indexing_slowlog_rolling.type = RollingFile
|
||||||
|
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||||
|
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_indexing_slowlog.json
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog
|
||||||
|
|
||||||
|
|
||||||
|
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
|
||||||
|
_index_indexing_slowlog-%i.json.gz
|
||||||
|
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||||
|
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
|
||||||
|
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.index_indexing_slowlog_rolling.strategy.max = 4
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
|
||||||
|
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||||
|
logger.index_indexing_slowlog.level = trace
|
||||||
|
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||||
|
logger.index_indexing_slowlog.additivity = false
|
||||||
|
|
||||||
|
|
||||||
|
logger.org_apache_pdfbox.name = org.apache.pdfbox
|
||||||
|
logger.org_apache_pdfbox.level = off
|
||||||
|
|
||||||
|
logger.org_apache_poi.name = org.apache.poi
|
||||||
|
logger.org_apache_poi.level = off
|
||||||
|
|
||||||
|
logger.org_apache_fontbox.name = org.apache.fontbox
|
||||||
|
logger.org_apache_fontbox.level = off
|
||||||
|
|
||||||
|
logger.org_apache_xmlbeans.name = org.apache.xmlbeans
|
||||||
|
logger.org_apache_xmlbeans.level = off
|
||||||
|
|
||||||
|
|
||||||
|
logger.com_amazonaws.name = com.amazonaws
|
||||||
|
logger.com_amazonaws.level = warn
|
||||||
|
|
||||||
|
logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
|
||||||
|
logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
|
||||||
|
logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
|
||||||
|
logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver
|
||||||
|
logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error
|
||||||
|
|
||||||
|
|
||||||
|
appender.audit_rolling.type = RollingFile
|
||||||
|
appender.audit_rolling.name = audit_rolling
|
||||||
|
appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json
|
||||||
|
appender.audit_rolling.layout.type = PatternLayout
|
||||||
|
appender.audit_rolling.layout.pattern = {\
|
||||||
|
"type":"audit", \
|
||||||
|
"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
|
||||||
|
%varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.roles":%map{user.roles}}\
|
||||||
|
%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\
|
||||||
|
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "indices":%map{indices}}\
|
||||||
|
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "put":%map{put}}\
|
||||||
|
%varsNotEmpty{, "delete":%map{delete}}\
|
||||||
|
%varsNotEmpty{, "change":%map{change}}\
|
||||||
|
%varsNotEmpty{, "create":%map{create}}\
|
||||||
|
%varsNotEmpty{, "invalidate":%map{invalidate}}\
|
||||||
|
}%n
|
||||||
|
# "node.name" node name from the `elasticsearch.yml` settings
|
||||||
|
# "node.id" node id which should not change between cluster restarts
|
||||||
|
# "host.name" unresolved hostname of the local node
|
||||||
|
# "host.ip" the local bound ip (i.e. the ip listening for connections)
|
||||||
|
# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
|
||||||
|
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
|
||||||
|
# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
|
||||||
|
# "user.name" the subject name as authenticated by a realm
|
||||||
|
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
|
||||||
|
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
|
||||||
|
# "user.realm" the name of the realm that authenticated "user.name"
|
||||||
|
# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
|
||||||
|
# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
|
||||||
|
# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.roles" the roles array of the user; these are the roles that are granting privileges
|
||||||
|
# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
|
||||||
|
# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
|
||||||
|
# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
|
||||||
|
# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
|
||||||
|
# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster
|
||||||
|
# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
|
||||||
|
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
|
||||||
|
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
|
||||||
|
# "realm_domain" if "realm" is under a domain, this is the name of the domain
|
||||||
|
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
|
||||||
|
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
|
||||||
|
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
|
||||||
|
# "request.body" the content of the request body entity, JSON escaped
|
||||||
|
# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
|
||||||
|
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
|
||||||
|
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
|
||||||
|
# "indices" the array of indices that the "action" is acting upon
|
||||||
|
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
|
||||||
|
# "trace_id" an identifier conveyed by the part of "traceparent" request header
|
||||||
|
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
|
||||||
|
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
|
||||||
|
# "rule" name of the applied rule if the "origin.type" is "ip_filter"
|
||||||
|
# the "put", "delete", "change", "create", "invalidate" fields are only present
|
||||||
|
# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
|
||||||
|
|
||||||
|
appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}-%i.json.gz
|
||||||
|
appender.audit_rolling.policies.type = Policies
|
||||||
|
appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.audit_rolling.policies.time.interval = 1
|
||||||
|
appender.audit_rolling.policies.time.modulate = true
|
||||||
|
appender.audit_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||||
|
appender.audit_rolling.policies.size.size = 1GB
|
||||||
|
appender.audit_rolling.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.audit_rolling.strategy.fileIndex = nomax
|
||||||
|
|
||||||
|
logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
|
||||||
|
logger.xpack_security_audit_logfile.level = info
|
||||||
|
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
|
||||||
|
logger.xpack_security_audit_logfile.additivity = false
|
||||||
|
|
||||||
|
logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
|
||||||
|
logger.xmlsig.level = error
|
||||||
|
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
|
||||||
|
logger.samlxml_decrypt.level = fatal
|
||||||
|
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
|
||||||
|
logger.saml2_decrypt.level = fatal
|
193
efk_cluster/config/log4j2.properties
Normal file
193
efk_cluster/config/log4j2.properties
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
status = error
|
||||||
|
|
||||||
|
######## Server JSON ############################
|
||||||
|
appender.rolling.type = Console
|
||||||
|
appender.rolling.name = rolling
|
||||||
|
appender.rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.rolling.layout.dataset = elasticsearch.server
|
||||||
|
|
||||||
|
################################################
|
||||||
|
|
||||||
|
################################################
|
||||||
|
|
||||||
|
rootLogger.level = info
|
||||||
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
|
|
||||||
|
######## Deprecation JSON #######################
|
||||||
|
appender.deprecation_rolling.type = Console
|
||||||
|
appender.deprecation_rolling.name = deprecation_rolling
|
||||||
|
appender.deprecation_rolling.layout.type = ECSJsonLayout
|
||||||
|
# Intentionally follows a different pattern to above
|
||||||
|
appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
|
||||||
|
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
|
||||||
|
|
||||||
|
appender.header_warning.type = HeaderWarningAppender
|
||||||
|
appender.header_warning.name = header_warning
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
logger.deprecation.name = org.elasticsearch.deprecation
|
||||||
|
logger.deprecation.level = WARN
|
||||||
|
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||||
|
logger.deprecation.appenderRef.header_warning.ref = header_warning
|
||||||
|
logger.deprecation.additivity = false
|
||||||
|
|
||||||
|
######## Search slowlog JSON ####################
|
||||||
|
appender.index_search_slowlog_rolling.type = Console
|
||||||
|
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||||
|
appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||||
|
logger.index_search_slowlog_rolling.level = trace
|
||||||
|
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
||||||
|
logger.index_search_slowlog_rolling.additivity = false
|
||||||
|
|
||||||
|
######## Indexing slowlog JSON ##################
|
||||||
|
appender.index_indexing_slowlog_rolling.type = Console
|
||||||
|
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
|
||||||
|
appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||||
|
logger.index_indexing_slowlog.level = trace
|
||||||
|
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||||
|
logger.index_indexing_slowlog.additivity = false
|
||||||
|
|
||||||
|
logger.org_apache_pdfbox.name = org.apache.pdfbox
|
||||||
|
logger.org_apache_pdfbox.level = off
|
||||||
|
|
||||||
|
logger.org_apache_poi.name = org.apache.poi
|
||||||
|
logger.org_apache_poi.level = off
|
||||||
|
|
||||||
|
logger.org_apache_fontbox.name = org.apache.fontbox
|
||||||
|
logger.org_apache_fontbox.level = off
|
||||||
|
|
||||||
|
logger.org_apache_xmlbeans.name = org.apache.xmlbeans
|
||||||
|
logger.org_apache_xmlbeans.level = off
|
||||||
|
|
||||||
|
logger.com_amazonaws.name = com.amazonaws
|
||||||
|
logger.com_amazonaws.level = warn
|
||||||
|
|
||||||
|
logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
|
||||||
|
logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
|
||||||
|
logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
|
||||||
|
logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error
|
||||||
|
|
||||||
|
logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver
|
||||||
|
logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error
|
||||||
|
|
||||||
|
appender.audit_rolling.type = Console
|
||||||
|
appender.audit_rolling.name = audit_rolling
|
||||||
|
appender.audit_rolling.layout.type = PatternLayout
|
||||||
|
appender.audit_rolling.layout.pattern = {\
|
||||||
|
"type":"audit", \
|
||||||
|
"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
|
||||||
|
%varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "user.roles":%map{user.roles}}\
|
||||||
|
%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\
|
||||||
|
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "indices":%map{indices}}\
|
||||||
|
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
|
||||||
|
%varsNotEmpty{, "put":%map{put}}\
|
||||||
|
%varsNotEmpty{, "delete":%map{delete}}\
|
||||||
|
%varsNotEmpty{, "change":%map{change}}\
|
||||||
|
%varsNotEmpty{, "create":%map{create}}\
|
||||||
|
%varsNotEmpty{, "invalidate":%map{invalidate}}\
|
||||||
|
}%n
|
||||||
|
# "node.name" node name from the `elasticsearch.yml` settings
|
||||||
|
# "node.id" node id which should not change between cluster restarts
|
||||||
|
# "host.name" unresolved hostname of the local node
|
||||||
|
# "host.ip" the local bound ip (i.e. the ip listening for connections)
|
||||||
|
# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
|
||||||
|
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
|
||||||
|
# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
|
||||||
|
# "user.name" the subject name as authenticated by a realm
|
||||||
|
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
|
||||||
|
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
|
||||||
|
# "user.realm" the name of the realm that authenticated "user.name"
|
||||||
|
# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
|
||||||
|
# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
|
||||||
|
# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain
|
||||||
|
# "user.roles" the roles array of the user; these are the roles that are granting privileges
|
||||||
|
# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
|
||||||
|
# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
|
||||||
|
# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
|
||||||
|
# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
|
||||||
|
# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster
|
||||||
|
# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
|
||||||
|
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
|
||||||
|
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
|
||||||
|
# "realm_domain" if "realm" is under a domain, this is the name of the domain
|
||||||
|
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
|
||||||
|
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
|
||||||
|
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
|
||||||
|
# "request.body" the content of the request body entity, JSON escaped
|
||||||
|
# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
|
||||||
|
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
|
||||||
|
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
|
||||||
|
# "indices" the array of indices that the "action" is acting upon
|
||||||
|
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
|
||||||
|
# "trace_id" an identifier conveyed by the part of "traceparent" request header
|
||||||
|
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
|
||||||
|
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
|
||||||
|
# "rule" name of the applied rule if the "origin.type" is "ip_filter"
|
||||||
|
# the "put", "delete", "change", "create", "invalidate" fields are only present
|
||||||
|
# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
|
||||||
|
|
||||||
|
logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
|
||||||
|
logger.xpack_security_audit_logfile.level = info
|
||||||
|
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
|
||||||
|
logger.xpack_security_audit_logfile.additivity = false
|
||||||
|
|
||||||
|
logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
|
||||||
|
logger.xmlsig.level = error
|
||||||
|
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
|
||||||
|
logger.samlxml_decrypt.level = fatal
|
||||||
|
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
|
||||||
|
logger.saml2_decrypt.level = fatal
|
14
efk_cluster/config/role_mapping.yml
Normal file
14
efk_cluster/config/role_mapping.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Role mapping configuration file which has elasticsearch roles as keys
|
||||||
|
# that map to one or more user or group distinguished names
|
||||||
|
|
||||||
|
#roleA: this is an elasticsearch role
|
||||||
|
# - groupA-DN this is a group distinguished name
|
||||||
|
# - groupB-DN
|
||||||
|
# - user1-DN this is the full user distinguished name
|
||||||
|
|
||||||
|
#power_user:
|
||||||
|
# - "cn=admins,dc=example,dc=com"
|
||||||
|
#user:
|
||||||
|
# - "cn=users,dc=example,dc=com"
|
||||||
|
# - "cn=admins,dc=example,dc=com"
|
||||||
|
# - "cn=John Doe,cn=other users,dc=example,dc=com"
|
3
efk_cluster/config/roles.yml
Normal file
3
efk_cluster/config/roles.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# The default roles file is empty as the preferred method of defining roles is
|
||||||
|
# through the API/UI. File based roles are useful in error scenarios when the
|
||||||
|
# API based roles may not be available.
|
1
efk_cluster/config/service_tokens
Normal file
1
efk_cluster/config/service_tokens
Normal file
@ -0,0 +1 @@
|
|||||||
|
elastic/kibana/my-token:{PBKDF2_STRETCH}10000$hqtLfPNEvoHIGGMhgpcrdTNCaXBqHGIOAj7ndDmt8w8=$073Kw/8neGbcNJQAi37DhyEKiIvYIM4MkzvuflndCbg=
|
0
efk_cluster/config/users
Normal file
0
efk_cluster/config/users
Normal file
0
efk_cluster/config/users_roles
Normal file
0
efk_cluster/config/users_roles
Normal file
32
efk_cluster/createFluentdAccoutnIn.sh
Executable file
32
efk_cluster/createFluentdAccoutnIn.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
ELASTIC_PASSWORD="your_secure_password"
|
||||||
|
ELASTIC_HOST=$(microk8s.kubectl get svc elasticsearch -n efk -o wide | awk 'NR==2 {print $3}')
|
||||||
|
echo $ES_IP
|
||||||
|
|
||||||
|
curl -X PUT "http://${ELASTIC_HOST}:9200/_security/role/fluentd_writer" \
|
||||||
|
-u elastic:$ELASTIC_PASSWORD \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"cluster": ["monitor"],
|
||||||
|
"indices": [
|
||||||
|
{
|
||||||
|
"names": ["logstash-*"],
|
||||||
|
"privileges": ["write", "create_index"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
echo "\n"
|
||||||
|
curl -X PUT "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \
|
||||||
|
-u elastic:$ELASTIC_PASSWORD \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"password": "fluentd_password",
|
||||||
|
"roles": ["fluentd_writer"]
|
||||||
|
}'
|
||||||
|
|
||||||
|
|
||||||
|
echo "\n"
|
||||||
|
curl -X GET "http://$ELASTIC_HOST:9200/_security/user/fluentd_user" \
|
||||||
|
-u elastic:$ELASTIC_PASSWORD
|
||||||
|
|
||||||
|
echo "\n"
|
4
efk_cluster/createSecure_passwd_forES.sh
Executable file
4
efk_cluster/createSecure_passwd_forES.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
microk8s kubectl create secret generic elasticsearch-secret -n efk \
|
||||||
|
--from-literal=elastic_password='your_secure_password'
|
5
efk_cluster/efk-namespace.yaml
Normal file
5
efk_cluster/efk-namespace.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# efk-namespace.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: efk
|
342
efk_cluster/elasticsearch-cluster.yaml
Normal file
342
efk_cluster/elasticsearch-cluster.yaml
Normal file
@ -0,0 +1,342 @@
|
|||||||
|
# PVC 定义(已存在,无需重复创建)
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-0
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 300Gi
|
||||||
|
volumeName: elasticsearch-hot-pv
|
||||||
|
storageClassName: ""
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-1
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 128Gi
|
||||||
|
volumeName: elasticsearch-warm-pv
|
||||||
|
storageClassName: ""
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-2
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 500Gi
|
||||||
|
volumeName: elasticsearch-cold-pv
|
||||||
|
storageClassName: ""
|
||||||
|
|
||||||
|
---
|
||||||
|
# StatefulSet for elasticsearch-0 (hot)
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-0
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
- name: init-dirs
|
||||||
|
image: busybox
|
||||||
|
command: ["sh", "-c", "mkdir -p /data/data /data/config"]
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /data
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[master, data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "hot"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
subPath: data
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
subPath: config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-0"
|
||||||
|
---
|
||||||
|
# StatefulSet for elasticsearch-1 (warm)
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-1
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
- name: init-dirs
|
||||||
|
image: busybox
|
||||||
|
command: ["sh", "-c", "mkdir -p /data/data /data/config"]
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /data
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-1"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "warm"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
subPath: data
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
subPath: config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-1"
|
||||||
|
---
|
||||||
|
# StatefulSet for elasticsearch-2 (cold)
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-2
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
- name: init-dirs
|
||||||
|
image: busybox
|
||||||
|
command: ["sh", "-c", "mkdir -p /data/data /data/config"]
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /data
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 0 # 以 root 运行
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-2"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "cold"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
subPath: data
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
subPath: config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-2"
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-service
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
selector:
|
||||||
|
app: elasticsearch
|
||||||
|
ports:
|
||||||
|
- port: 9200
|
||||||
|
name: http
|
||||||
|
- port: 9300
|
||||||
|
name: transport
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
namespace: efk
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
elastic_password: bXluZXdwYXNzd29yZA== # 示例密码 "mynewpassword"
|
13
efk_cluster/elasticsearch-configMap.yaml
Normal file
13
efk_cluster/elasticsearch-configMap.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: es-config
|
||||||
|
namespace: efk
|
||||||
|
data:
|
||||||
|
node.roles.elasticsearch-0: "[master, data]"
|
||||||
|
node.roles.elasticsearch-1: "[data]"
|
||||||
|
node.roles.elasticsearch-2: "[data]"
|
||||||
|
node.attr.data-tier.elasticsearch-0: "hot"
|
||||||
|
node.attr.data-tier.elasticsearch-1: "warm"
|
||||||
|
node.attr.data-tier.elasticsearch-2: "cold"
|
||||||
|
|
55
efk_cluster/elasticsearch-deployment.yaml
Normal file
55
efk_cluster/elasticsearch-deployment.yaml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# elasticsearch-deployment-8.8.yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch
|
||||||
|
namespace: efk
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
- containerPort: 9300
|
||||||
|
env:
|
||||||
|
- name: discovery.type
|
||||||
|
value: "single-node"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g" # 设置 JVM 堆大小为 2GB-4GB
|
||||||
|
resources: # 增加资源限制
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data-volume
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
volumes:
|
||||||
|
- name: data-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: elasticsearch-data-pvc
|
||||||
|
- name: config-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: elasticsearch-config-pvc
|
18
efk_cluster/elasticsearch-ingress.yaml
Normal file
18
efk_cluster/elasticsearch-ingress.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
ingressClassName: traefik
|
||||||
|
rules:
|
||||||
|
- host: elastic.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- backend:
|
||||||
|
service:
|
||||||
|
name: elasticsearch # 指向新的 Service
|
||||||
|
port:
|
||||||
|
number: 9200
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
25
efk_cluster/elasticsearch-pv.yaml
Normal file
25
efk_cluster/elasticsearch-pv.yaml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 300Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: microk8s-hostpath
|
||||||
|
hostPath:
|
||||||
|
path: /var/snap/microk8s/common/mnt/data/elasticsearch-data
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10Mi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: microk8s-hostpath
|
||||||
|
hostPath:
|
||||||
|
path: /var/snap/microk8s/common/mnt/data/elasticsearch-config
|
91
efk_cluster/elasticsearch-pvandpvc_incluster.yaml
Normal file
91
efk_cluster/elasticsearch-pvandpvc_incluster.yaml
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# Hot 节点的 PV
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-hot-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 300Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
nfs:
|
||||||
|
server: 10.71.142.1 # fenny
|
||||||
|
path: /root/elasticsearch/hotData
|
||||||
|
|
||||||
|
---
|
||||||
|
# Warm 节点的 PV
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-warm-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 128Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
nfs:
|
||||||
|
server: 10.71.142.1 # fenny
|
||||||
|
path: /root/elasticsearch/warmData
|
||||||
|
|
||||||
|
---
|
||||||
|
# Cold 节点的 PV
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-cold-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 500Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
nfs:
|
||||||
|
server: 10.71.142.1 # fenny
|
||||||
|
path: /root/elasticsearch/coldData
|
||||||
|
|
||||||
|
---
|
||||||
|
# PVC for Hot
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-0
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 300Gi
|
||||||
|
volumeName: elasticsearch-hot-pv
|
||||||
|
storageClassName: "" # 显式禁用 StorageClass
|
||||||
|
|
||||||
|
---
|
||||||
|
# PVC for Warm
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-1
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 128Gi
|
||||||
|
volumeName: elasticsearch-warm-pv
|
||||||
|
storageClassName: "" # 显式禁用 StorageClass
|
||||||
|
|
||||||
|
---
|
||||||
|
# PVC for Cold
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-2
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 500Gi
|
||||||
|
volumeName: elasticsearch-cold-pv
|
||||||
|
storageClassName: "" # 显式禁用 StorageClass
|
43
efk_cluster/elasticsearch-pvc.yaml
Normal file
43
efk_cluster/elasticsearch-pvc.yaml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-0
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 300Gi
|
||||||
|
volumeName: elasticsearch-hot-pv
|
||||||
|
storageClassName: "" # 禁用默认 StorageClass
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-1
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 128Gi
|
||||||
|
volumeName: elasticsearch-warm-pv
|
||||||
|
storageClassName: ""
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-data-pvc-elasticsearch-2
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 500Gi
|
||||||
|
volumeName: elasticsearch-cold-pv
|
||||||
|
storageClassName: ""
|
8
efk_cluster/elasticsearch-secret.yaml
Normal file
8
efk_cluster/elasticsearch-secret.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
namespace: efk
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
elastic_password: eW91cl9zZWN1cmVfcGFzc3dvcmQ= # Base64 编码的 "your_secure_password"
|
13
efk_cluster/elasticsearch-service.yaml
Normal file
13
efk_cluster/elasticsearch-service.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 9200
|
||||||
|
targetPort: 9200
|
||||||
|
name: http
|
||||||
|
selector:
|
||||||
|
statefulset.kubernetes.io/pod-name: elasticsearch-0 # 只选择 elasticsearch-0
|
||||||
|
type: LoadBalancer # 保留 LoadBalancer 类型
|
269
efk_cluster/elasticsearch-statefulSet.yaml
Normal file
269
efk_cluster/elasticsearch-statefulSet.yaml
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
# elasticsearch-0
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-0
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[master, data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "hot"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-0"
|
||||||
|
- name: config-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-config-pvc"
|
||||||
|
---
|
||||||
|
# elasticsearch-1
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-1
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-1"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "warm"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-1"
|
||||||
|
- name: config-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-config-pvc"
|
||||||
|
---
|
||||||
|
# elasticsearch-2
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-2
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
serviceName: elasticsearch-service
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: elasticsearch
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: elasticsearch
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: init-sysctl
|
||||||
|
image: busybox
|
||||||
|
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
containers:
|
||||||
|
- name: elasticsearch
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 9200
|
||||||
|
name: http
|
||||||
|
- containerPort: 9300
|
||||||
|
name: transport
|
||||||
|
env:
|
||||||
|
- name: cluster.name
|
||||||
|
value: "my-es-cluster"
|
||||||
|
- name: node.name
|
||||||
|
value: "elasticsearch-2"
|
||||||
|
- name: discovery.seed_hosts
|
||||||
|
value: "elasticsearch-0.elasticsearch-service.efk.svc.cluster.local,elasticsearch-1.elasticsearch-service.efk.svc.cluster.local,elasticsearch-2.elasticsearch-service.efk.svc.cluster.local"
|
||||||
|
- name: cluster.initial_master_nodes
|
||||||
|
value: "elasticsearch-0"
|
||||||
|
- name: xpack.security.enabled
|
||||||
|
value: "true"
|
||||||
|
- name: ELASTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
key: elastic_password
|
||||||
|
- name: ES_JAVA_OPTS
|
||||||
|
value: "-Xms2g -Xmx4g"
|
||||||
|
- name: node.roles
|
||||||
|
value: "[data]"
|
||||||
|
- name: node.attr.data-tier
|
||||||
|
value: "cold"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "4Gi"
|
||||||
|
cpu: "1"
|
||||||
|
limits:
|
||||||
|
memory: "6Gi"
|
||||||
|
cpu: "2"
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /usr/share/elasticsearch/data
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /usr/share/elasticsearch/config
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values: ["elasticsearch"]
|
||||||
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-data-pvc-elasticsearch-2"
|
||||||
|
- name: config-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "elasticsearch-config-pvc"
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-service
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
clusterIP: None
|
||||||
|
selector:
|
||||||
|
app: elasticsearch
|
||||||
|
ports:
|
||||||
|
- port: 9200
|
||||||
|
name: http
|
||||||
|
- port: 9300
|
||||||
|
name: transport
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch-secret
|
||||||
|
namespace: efk
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
elastic_password: eW91cl9zZWN1cmVfcGFzc3dvcmQ= # Base64 编码的 "your_secure_password"
|
62
efk_cluster/fileBeat-config.yaml
Normal file
62
efk_cluster/fileBeat-config.yaml
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# filebeat-config.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: filebeat-config
|
||||||
|
namespace: efk
|
||||||
|
data:
|
||||||
|
filebeat.yml: |
|
||||||
|
filebeat.inputs:
|
||||||
|
- type: http_endpoint
|
||||||
|
enabled: true
|
||||||
|
listen_address: 0.0.0.0:8888
|
||||||
|
path: "/"
|
||||||
|
json.keys_under_root: true
|
||||||
|
|
||||||
|
processors:
|
||||||
|
# 提取路径中的变量(严格模式)
|
||||||
|
- dissect:
|
||||||
|
tokenizer: "/tanya.candle.%{currency}.%{year}.%{interval}"
|
||||||
|
field: "http.request.path"
|
||||||
|
target_prefix: ""
|
||||||
|
ignore_missing: false # 关键:关闭忽略缺失
|
||||||
|
|
||||||
|
# 强制设置默认值(即使字段为空)
|
||||||
|
- script:
|
||||||
|
lang: javascript
|
||||||
|
source: |
|
||||||
|
function process(event) {
|
||||||
|
// 先检查字段是否存在,不存在则设置默认值
|
||||||
|
if (!event.containsKey('currency') || event.get('currency') === '') {
|
||||||
|
event.put('currency', 'unknown');
|
||||||
|
}
|
||||||
|
if (!event.containsKey('year') || event.get('year') === '') {
|
||||||
|
event.put('year', '0000');
|
||||||
|
}
|
||||||
|
if (!event.containsKey('interval') || event.get('interval') === '') {
|
||||||
|
event.put('interval', '0D');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output.elasticsearch:
|
||||||
|
hosts: ["http://elasticsearch:9200"]
|
||||||
|
username: "fluentd_user"
|
||||||
|
password: "fluentd_password"
|
||||||
|
indices:
|
||||||
|
- index: "logstash-candle-${currency}-${year}-${interval}"
|
||||||
|
# 严格验证字段值非空
|
||||||
|
when.and:
|
||||||
|
- not.equals:
|
||||||
|
currency: ""
|
||||||
|
- not.equals:
|
||||||
|
year: ""
|
||||||
|
- not.equals:
|
||||||
|
interval: ""
|
||||||
|
- index: "fallback-index"
|
||||||
|
when.or:
|
||||||
|
- equals:
|
||||||
|
currency: ""
|
||||||
|
- equals:
|
||||||
|
year: ""
|
||||||
|
- equals:
|
||||||
|
interval: ""
|
38
efk_cluster/fileBeat-daemonset.yaml
Normal file
38
efk_cluster/fileBeat-daemonset.yaml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# filebeat-daemonset.yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: filebeat
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: filebeat
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: filebeat
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: filebeat
|
||||||
|
image: docker.elastic.co/beats/filebeat:8.12.0
|
||||||
|
args: [
|
||||||
|
"-c", "/etc/filebeat.yml",
|
||||||
|
"-e",
|
||||||
|
"-strict.perms=false"
|
||||||
|
]
|
||||||
|
volumeMounts:
|
||||||
|
- name: config
|
||||||
|
mountPath: /etc/filebeat.yml
|
||||||
|
readOnly: true
|
||||||
|
subPath: filebeat.yml
|
||||||
|
- name: varlog
|
||||||
|
mountPath: /var/log
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: filebeat-config
|
||||||
|
- name: varlog
|
||||||
|
hostPath:
|
||||||
|
path: /var/log
|
22
efk_cluster/fileBeat-ingress.yaml
Normal file
22
efk_cluster/fileBeat-ingress.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: filebeat-ingress
|
||||||
|
namespace: efk
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||||
|
# 添加 SSL 终止支持的注释,如果需要 TLS/SSL 支持
|
||||||
|
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||||
|
spec:
|
||||||
|
ingressClassName: traefik
|
||||||
|
rules:
|
||||||
|
- host: filebeat.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: filebeat-service
|
||||||
|
port:
|
||||||
|
number: 8888
|
14
efk_cluster/fileBeat-service.yaml
Normal file
14
efk_cluster/fileBeat-service.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: filebeat-service
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
selector:
|
||||||
|
k8s-app: filebeat
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 8888 # Service 暴露的端口
|
||||||
|
targetPort: 8888 # Fluentd 容器内部的端口
|
||||||
|
|
46
efk_cluster/fluentd-configMap.yaml
Normal file
46
efk_cluster/fluentd-configMap.yaml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: fluentd-config
|
||||||
|
namespace: efk
|
||||||
|
data:
|
||||||
|
fluent.conf: |
|
||||||
|
<source>
|
||||||
|
@type http
|
||||||
|
@id input_http
|
||||||
|
port 8888
|
||||||
|
tag sardine.log
|
||||||
|
@label @main
|
||||||
|
</source>
|
||||||
|
|
||||||
|
<label @main>
|
||||||
|
<match sardine.log.**>
|
||||||
|
@type copy
|
||||||
|
<store>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
logstash_format true
|
||||||
|
logstash_prefix logstash
|
||||||
|
logstash_dateformat %Y.%m.%d
|
||||||
|
flush_interval 5s # 缩短批量写入间隔
|
||||||
|
@log_level debug
|
||||||
|
id_key _id
|
||||||
|
remove_keys _id
|
||||||
|
</store>
|
||||||
|
<store>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout
|
||||||
|
</store>
|
||||||
|
</match>
|
||||||
|
</label>
|
||||||
|
|
||||||
|
<match **>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout_all
|
||||||
|
</match>
|
||||||
|
|
46
efk_cluster/fluentd-configMap2.yaml
Normal file
46
efk_cluster/fluentd-configMap2.yaml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: fluentd-config
|
||||||
|
namespace: efk
|
||||||
|
data:
|
||||||
|
fluent.conf: |
|
||||||
|
<source>
|
||||||
|
@type http
|
||||||
|
@id input_http
|
||||||
|
port 8888
|
||||||
|
@label @main
|
||||||
|
@log_level debug
|
||||||
|
<parse>
|
||||||
|
@type json
|
||||||
|
</parse>
|
||||||
|
</source>
|
||||||
|
|
||||||
|
<label @main>
|
||||||
|
<match sardine.log.**>
|
||||||
|
@type copy
|
||||||
|
<store>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
logstash_format true
|
||||||
|
logstash_prefix logstash
|
||||||
|
logstash_dateformat %Y.%m.%d
|
||||||
|
flush_interval 5s
|
||||||
|
@log_level debug
|
||||||
|
id_key _id
|
||||||
|
remove_keys _id
|
||||||
|
</store>
|
||||||
|
<store>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout
|
||||||
|
</store>
|
||||||
|
</match>
|
||||||
|
<match **>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout_all
|
||||||
|
</match>
|
56
efk_cluster/fluentd-daemonset.yaml
Normal file
56
efk_cluster/fluentd-daemonset.yaml
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: fluentd
|
||||||
|
namespace: efk
|
||||||
|
labels:
|
||||||
|
k8s-app: fluentd-logging
|
||||||
|
version: v1
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: fluentd-logging
|
||||||
|
version: v1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: fluentd-logging
|
||||||
|
version: v1
|
||||||
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
containers:
|
||||||
|
- name: fluentd
|
||||||
|
image: fluent/fluentd-kubernetes-daemonset:v1.17.1-debian-elasticsearch8-1.0
|
||||||
|
env:
|
||||||
|
- name: K8S_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: TZ
|
||||||
|
value: Asia/Shanghai # 设置时区为东八区
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: fluentd-config-volume
|
||||||
|
mountPath: /fluentd/etc/fluent.conf
|
||||||
|
subPath: fluent.conf
|
||||||
|
- name: timezone
|
||||||
|
mountPath: /etc/localtime
|
||||||
|
readOnly: true
|
||||||
|
terminationGracePeriodSeconds: 30
|
||||||
|
volumes:
|
||||||
|
- name: fluentd-config-volume
|
||||||
|
configMap:
|
||||||
|
name: fluentd-config
|
||||||
|
- name: timezone
|
||||||
|
hostPath:
|
||||||
|
path: /usr/share/zoneinfo/Asia/Shanghai # 挂载东八区时区文件
|
||||||
|
|
28
efk_cluster/fluentd-ingress.yaml
Normal file
28
efk_cluster/fluentd-ingress.yaml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: fluentd-ingress
|
||||||
|
namespace: efk
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||||
|
# 添加 SSL 终止支持的注释,如果需要 TLS/SSL 支持
|
||||||
|
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||||
|
spec:
|
||||||
|
ingressClassName: traefik
|
||||||
|
rules:
|
||||||
|
- host: fluentd.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: fluentd-service
|
||||||
|
port:
|
||||||
|
number: 8888
|
||||||
|
# 如果你希望启用 TLS/SSL,需要额外的配置
|
||||||
|
# tls:
|
||||||
|
# - hosts:
|
||||||
|
# - fluentd.k8s.xunlang.home
|
||||||
|
# secretName: fluentd-tls-secret # 这是用于TLS的secret
|
||||||
|
|
22
efk_cluster/fluentd-ingress2.yaml_disabled
Normal file
22
efk_cluster/fluentd-ingress2.yaml_disabled
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: fluentd-ingress
|
||||||
|
namespace: kube-system
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||||
|
# 如果需要 TLS/SSL 支持,可以开启如下注释
|
||||||
|
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: fluentd.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: fluentd-service # 你的 Fluentd Service
|
||||||
|
port:
|
||||||
|
number: 8888 # Fluentd 容器内部的端口
|
||||||
|
|
14
efk_cluster/fluentd-service.yaml
Normal file
14
efk_cluster/fluentd-service.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: fluentd-service
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
selector:
|
||||||
|
k8s-app: fluentd-logging
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 8888 # Service 暴露的端口
|
||||||
|
targetPort: 8888 # Fluentd 容器内部的端口
|
||||||
|
|
39
efk_cluster/fluentd.conf
Normal file
39
efk_cluster/fluentd.conf
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
<source>
|
||||||
|
@type http
|
||||||
|
@id input_http
|
||||||
|
port 8888
|
||||||
|
tag sardine.log
|
||||||
|
@label @main
|
||||||
|
</source>
|
||||||
|
|
||||||
|
<label @main>
|
||||||
|
<match sardine.log.**>
|
||||||
|
@type copy
|
||||||
|
<store>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
logstash_format true
|
||||||
|
logstash_prefix logstash
|
||||||
|
logstash_dateformat %Y.%m.%d
|
||||||
|
flush_interval 5s # 缩短批量写入间隔
|
||||||
|
@log_level debug
|
||||||
|
id_key _id
|
||||||
|
remove_keys _id
|
||||||
|
</store>
|
||||||
|
<store>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout
|
||||||
|
</store>
|
||||||
|
</match>
|
||||||
|
</label>
|
||||||
|
|
||||||
|
<match **>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout_all
|
||||||
|
</match>
|
||||||
|
|
60
efk_cluster/fluentd1.conf
Normal file
60
efk_cluster/fluentd1.conf
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
<source>
|
||||||
|
@type http
|
||||||
|
@id input_http
|
||||||
|
port 8888
|
||||||
|
@label @main
|
||||||
|
</source>
|
||||||
|
|
||||||
|
<label @main>
|
||||||
|
<match sardine.log.**>
|
||||||
|
@type copy
|
||||||
|
<store>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
logstash_format true
|
||||||
|
logstash_prefix logstash
|
||||||
|
logstash_dateformat %Y.%m.%d
|
||||||
|
flush_interval 5s
|
||||||
|
@log_level debug
|
||||||
|
id_key _id
|
||||||
|
remove_keys _id
|
||||||
|
</store>
|
||||||
|
<store>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout
|
||||||
|
</store>
|
||||||
|
</match>
|
||||||
|
|
||||||
|
<match tanya.**>
|
||||||
|
@type copy
|
||||||
|
<store>
|
||||||
|
@type elasticsearch
|
||||||
|
@id output_elasticsearch_tanya
|
||||||
|
host elasticsearch
|
||||||
|
port 9200
|
||||||
|
scheme http
|
||||||
|
user fluentd_user
|
||||||
|
password fluentd_password
|
||||||
|
logstash_format false
|
||||||
|
index_name logstash_candle_${tag_parts[1]}_${tag_parts[2]}_${tag_parts[3]}
|
||||||
|
flush_interval 5s
|
||||||
|
@log_level debug
|
||||||
|
id_key _id
|
||||||
|
remove_keys _id
|
||||||
|
</store>
|
||||||
|
<store>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout_tanya
|
||||||
|
</store>
|
||||||
|
</match>
|
||||||
|
</label>
|
||||||
|
|
||||||
|
<match **>
|
||||||
|
@type stdout
|
||||||
|
@id output_stdout_all
|
||||||
|
</match>
|
5
efk_cluster/fluentdLogs.sh
Executable file
5
efk_cluster/fluentdLogs.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}')
|
||||||
|
echo "new pod is: " ${FLUENTD_POD}
|
||||||
|
microk8s.kubectl logs -f pod/${FLUENTD_POD} -n efk
|
3
efk_cluster/getConfigFlentd.sh
Executable file
3
efk_cluster/getConfigFlentd.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
microk8s.kubectl describe configMap fluentd-config -n efk
|
34
efk_cluster/init.sh
Normal file
34
efk_cluster/init.sh
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
cd ../traefik
|
||||||
|
microk8s.kubectl apply -f traefik_role.yaml
|
||||||
|
microk8s.kubectl apply -f traefik_role.yaml
|
||||||
|
microk8s.kubectl apply -f traefik_roleBind.yaml
|
||||||
|
|
||||||
|
cd -
|
||||||
|
sudo su
|
||||||
|
// 准备 es 配置
|
||||||
|
|
||||||
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/fluentd-x86-image.tar
|
||||||
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/elasticsearch-8-8-0.tar
|
||||||
|
microk8s.ctr image import ~/shared/powerssd/images/docker/x86/kibana-8-8-0.tar
|
||||||
|
|
||||||
|
// 创建 es 资源
|
||||||
|
microk8s.kubectl apply -f efk-namespace.yaml
|
||||||
|
microk8s.kubectl apply -f elasticsearch-pvandpvc_incluster.yaml
|
||||||
|
microk8s.kubectl apply -f elasticsearch-statefulSet.yaml
|
||||||
|
microk8s.kubectl apply -f elasticsearch-service.yaml
|
||||||
|
microk8s.kubectl apply -f elasticsearch-ingress.yaml
|
||||||
|
sleep 30
|
||||||
|
./createSecure_passwd_forES.sh
|
||||||
|
./createFluentdAccoutnIn.sh
|
||||||
|
|
||||||
|
microk8s.kubectl apply -f fluentd-configMap.yaml
|
||||||
|
microk8s.kubectl apply -f fluentd-daemonset.yaml
|
||||||
|
microk8s.kubectl apply -f fluentd-service.yaml
|
||||||
|
microk8s.kubectl apply -f fluentd-ingress.yaml
|
||||||
|
microk8s.kubectl apply -f fluentd-service.yaml
|
||||||
|
|
||||||
|
microk8s.kubectl apply -f kibana-deployment.yaml
|
||||||
|
microk8s.kubectl apply -f kibana-ingress.yaml
|
||||||
|
microk8s.kubectl apply -f kibana-service.yaml
|
||||||
|
|
||||||
|
./refreshTokenForKibana.sh
|
10
efk_cluster/kibana-configMap.yaml_disabled
Normal file
10
efk_cluster/kibana-configMap.yaml_disabled
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: kibana-config
|
||||||
|
data:
|
||||||
|
kibana.yml: |
|
||||||
|
server.host: "0.0.0.0"
|
||||||
|
elasticsearch.hosts: ["http://elasticsearch:9200"]
|
||||||
|
# Add other configurations here
|
||||||
|
|
33
efk_cluster/kibana-deployment.yaml
Normal file
33
efk_cluster/kibana-deployment.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: kibana
|
||||||
|
namespace: efk
|
||||||
|
labels:
|
||||||
|
app: kibana
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: kibana
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: kibana
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kibana
|
||||||
|
image: docker.elastic.co/kibana/kibana:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 5601
|
||||||
|
env:
|
||||||
|
- name: ELASTICSEARCH_HOSTS
|
||||||
|
value: "http://elasticsearch.efk.svc.cluster.local:9200"
|
||||||
|
- name: XPACK_SECURITY_ENCRYPTIONKEY
|
||||||
|
value: "a_secure_random_string_of_32_characters"
|
||||||
|
- name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY
|
||||||
|
value: "another_secure_random_string_of_32_characters"
|
||||||
|
- name: XPACK_REPORTING_ENCRYPTIONKEY
|
||||||
|
value: "yet_another_secure_random_string_of_32_characters"
|
||||||
|
- name: ELASTICSEARCH_SERVICEACCOUNTTOKEN
|
||||||
|
value: "AAEAAWVsYXN0aWMva2liYW5hL215LXRva2VuOmd3ZG9idU5kVEM2b3BLRUJDS2g5YVE"
|
33
efk_cluster/kibana-deployment.yaml_tmp
Normal file
33
efk_cluster/kibana-deployment.yaml_tmp
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: kibana
|
||||||
|
namespace: efk
|
||||||
|
labels:
|
||||||
|
app: kibana
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: kibana
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: kibana
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kibana
|
||||||
|
image: docker.elastic.co/kibana/kibana:8.8.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 5601
|
||||||
|
env:
|
||||||
|
- name: ELASTICSEARCH_HOSTS
|
||||||
|
value: "http://elasticsearch.efk.svc.cluster.local:9200"
|
||||||
|
- name: XPACK_SECURITY_ENCRYPTIONKEY
|
||||||
|
value: "a_secure_random_string_of_32_characters"
|
||||||
|
- name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY
|
||||||
|
value: "another_secure_random_string_of_32_characters"
|
||||||
|
- name: XPACK_REPORTING_ENCRYPTIONKEY
|
||||||
|
value: "yet_another_secure_random_string_of_32_characters"
|
||||||
|
- name: ELASTICSEARCH_SERVICEACCOUNTTOKEN
|
||||||
|
value: "##TOKEN##"
|
18
efk_cluster/kibana-ingress.yaml
Normal file
18
efk_cluster/kibana-ingress.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: kibana
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
ingressClassName: traefik
|
||||||
|
rules:
|
||||||
|
- host: kibana.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- backend:
|
||||||
|
service:
|
||||||
|
name: kibana
|
||||||
|
port:
|
||||||
|
number: 5601
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
25
efk_cluster/kibana-persistent.yaml_disabled
Normal file
25
efk_cluster/kibana-persistent.yaml_disabled
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# PersistentVolume 配置
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: kibana-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 1Gi # 这里用 capacity 指定存储大小
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
hostPath:
|
||||||
|
path: /var/snap/microk8s/common/mnt/data/kibana # 本地路径
|
||||||
|
---
|
||||||
|
# PersistentVolumeClaim 配置
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: kibana-config-pvc
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi # 这里使用 resources 请求存储大小
|
||||||
|
|
13
efk_cluster/kibana-service.yaml
Normal file
13
efk_cluster/kibana-service.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kibana
|
||||||
|
namespace: efk
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 5601
|
||||||
|
targetPort: 5601
|
||||||
|
selector:
|
||||||
|
app: kibana
|
||||||
|
type: LoadBalancer
|
4
efk_cluster/myFluentd.Dockerfile
Normal file
4
efk_cluster/myFluentd.Dockerfile
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
FROM fluent/fluentd-kubernetes-daemonset:v1.16-debian-elasticsearch8-2
|
||||||
|
USER root
|
||||||
|
RUN fluent-gem install fluent-plugin-rewrite-tag-filter fluent-plugin-dynamic
|
||||||
|
USER fluent
|
21
efk_cluster/refreshTokenForKibana.sh
Executable file
21
efk_cluster/refreshTokenForKibana.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# 获取 elasticsearch 实例的 Pod 名称
|
||||||
|
ES_POD=$(microk8s.kubectl get pods -n efk -l app=elasticsearch -o jsonpath='{.items[0].metadata.name}')
|
||||||
|
echo "ES_POD:" ${ES_POD}
|
||||||
|
|
||||||
|
# 进入 Elasticsearch Pod 并生成服务账号 token
|
||||||
|
microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens delete elastic/kibana my-token
|
||||||
|
TOKEN=$(microk8s.kubectl exec -n efk $ES_POD -- bin/elasticsearch-service-tokens create elastic/kibana my-token | grep 'SERVICE_TOKEN' | awk '{print $NF}')
|
||||||
|
|
||||||
|
echo "new TOKEN:" ${TOKEN}
|
||||||
|
|
||||||
|
|
||||||
|
microk8s.kubectl delete deployment kibana -n efk
|
||||||
|
# 更新 Kibana Deployment YAML 文件
|
||||||
|
cd $(pwd)
|
||||||
|
# 将 token 设置为环境变量
|
||||||
|
sed "s/##TOKEN##/$TOKEN/" kibana-deployment.yaml_tmp > kibana-deployment.yaml
|
||||||
|
# 删除并重新应用 Kibana Deployment
|
||||||
|
microk8s.kubectl apply -f kibana-deployment.yaml
|
||||||
|
|
8
efk_cluster/tem.yaml
Normal file
8
efk_cluster/tem.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# filebeat-config.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: filebeat-config
|
||||||
|
namespace: efk
|
||||||
|
data:
|
||||||
|
filebeat.yml: |
|
13
efk_cluster/updateFluentdConfigMap.sh
Executable file
13
efk_cluster/updateFluentdConfigMap.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
microk8s.kubectl delete configMap fluentd-config -n efk
|
||||||
|
microk8s.kubectl apply -f fluentd-configMap.yaml
|
||||||
|
microk8s.kubectl delete daemonSet fluentd -n efk
|
||||||
|
microk8s.kubectl apply -f fluentd-daemonset.yaml
|
||||||
|
sleep 5
|
||||||
|
FLUENTD_POD=$(microk8s.kubectl get pods -n efk | grep fluentd | awk '{print $1}')
|
||||||
|
echo "new pod is: " ${FLUENTD_POD}
|
||||||
|
microk8s.kubectl logs pod/${FLUENTD_POD} -n efk
|
||||||
|
sleep 2
|
||||||
|
exit
|
||||||
|
|
28
myrari/Dockerfile
Normal file
28
myrari/Dockerfile
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Use official Node.js LTS image
|
||||||
|
FROM node:18-alpine
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy package files
|
||||||
|
COPY package*.json ./
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
# Copy application files
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Expose the application port
|
||||||
|
EXPOSE 3000
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV NODE_ENV=production
|
||||||
|
|
||||||
|
# Health check endpoint
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s \
|
||||||
|
CMD curl -f http://localhost:3000/ || exit 1
|
||||||
|
|
||||||
|
# Run the application
|
||||||
|
CMD ["node", "server.js"]
|
||||||
|
|
45
myrari/deployment.yaml
Normal file
45
myrari/deployment.yaml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: myfari-app
|
||||||
|
namespace: farui
|
||||||
|
labels:
|
||||||
|
app: myfari
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: myfari
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: myfari
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: myfari
|
||||||
|
image: localhost:32000/public/myfari:v04
|
||||||
|
ports:
|
||||||
|
- containerPort: 3000
|
||||||
|
env:
|
||||||
|
- name: NODE_ENV
|
||||||
|
value: "production"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "100m"
|
||||||
|
memory: "128Mi"
|
||||||
|
limits:
|
||||||
|
cpu: "500m"
|
||||||
|
memory: "512Mi"
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 3000
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 10
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /ready
|
||||||
|
port: 3000
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 5
|
||||||
|
|
19
myrari/ingress.yaml
Normal file
19
myrari/ingress.yaml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: myfari-ingress
|
||||||
|
namespace: farui
|
||||||
|
spec:
|
||||||
|
ingressClassName: traefik
|
||||||
|
rules:
|
||||||
|
- host: myfari.k8s.xunlang.home
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- backend:
|
||||||
|
service:
|
||||||
|
name: myfari-service
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
||||||
|
|
16
myrari/service.yaml
Normal file
16
myrari/service.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: myfari-service
|
||||||
|
namespace: farui
|
||||||
|
labels:
|
||||||
|
app: myfari
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: myfari
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 80
|
||||||
|
targetPort: 3000
|
||||||
|
type: ClusterIP
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user