系统初始化

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#防火墙开放端口
systemctl start firewalld
systemctl enable firewalld
firewall-cmd --permanent --add-port=5044/tcp
firewall-cmd --permanent --add-port=5601/tcp
firewall-cmd --permanent --add-port=9300/tcp
sudo firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="192.168.30.133" port protocol="tcp" port="9200" accept'
sudo firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="192.168.30.134" port protocol="tcp" port="9200" accept'
sudo firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="192.168.30.135" port protocol="tcp" port="9200" accept'
firewall-cmd --reload
#关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
#添加hosts
cat >> /etc/hosts << EOF
192.168.30.133 elk1
192.168.30.134 elk2
192.168.30.135 elk3
EOF
# 创建用户
groupadd elk
useradd elk -g elk
# 创建数据及⽇志⽂件并授权
mkdir -pv /data/elk/{data,logs}
chown -R elk:elk /data/elk/

# 调整进程最大打开文件数数量
#重启生效
vi /etc/security/limits.conf
* hard nofile 65535
* soft nofile 65535
#验证
ulimit -n

# 调整进程最大虚拟内存区域数量
echo "vm.max_map_count=262144" >> /etc/sysctl.conf
sysctl -p

Elasticsearch集群

1
2
3
4
5
6
7
cd /opt/
#解压到当前⽬录
tar -xf elasticsearch-7.9.3-linux-x86_64.tar.gz -C .
mv elasticsearch-7.9.3-linux-x86_64 elasticsearch

#授权
chown -R elk:elk elasticsearch

集群配置

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
vim elasticsearch/config/elasticsearch.yml

#必改配置
node.name: elk1
#通用配置
path.data: /data/elk/data
path.logs: /data/elk/logs
bootstrap.memory_lock: false
http.port: 9200
cluster.name: elk-cluster
network.host: 0.0.0.0
discovery.seed_hosts: ["192.168.30.133", "192.168.31.62","192.30.134"]
cluster.initial_master_nodes: ["elk1"]

ingest.geoip.downloader.enabled: false

xpack.security.enabled: true
xpack.security.transport.ssl:
  enabled: true
  verification_mode: certificate
  keystore.path: certs/elastic-certificates.p12
  truststore.path: certs/elastic-certificates.p12

生成认证文件

1
2
3
4
5
6
7
8
cd elasticsearch
bin/elasticsearch-certutil ca
bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 
mkdir config/certs
mv *.p12 config/certs
chmod -R +755 config/certs

#复制certs到每个节点

jvm.option的设置

1
2
3
4
#机器内存的一半
vim /opt/elasticsearch/config/jvm.options
-Xms1g
-Xmx1g

启动elasticsearch

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
su elk # 切换到用户

cd /opt/elasticsearch

#启动脚本
vim start.sh

su - elk -c "/opt/elasticsearch/bin/elasticsearch -d"

#启动
sh start.sh

#验证
curl -XGET 'http://127.0.0.1:9200/_cat/nodes?pretty' #查看集群节点

设置用户密码

1
bin/elasticsearch-setup-passwords interactive

logstash

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#安装
cd /opt
tar zxvf logstash-7.9.3-linux-x86_64.tar.gz
mv logstash-7.9.3-linux-x86_64 logstash

#修改配置
vim logstash/config/logstash-sample.conf

input {
  beats {
    port => 5044
  }
  file {
    path => "/usr/local/nginx/logs/access.log"
    start_position => "beginning"
  }
}

filter {
  if [path] =~ "access" {
    mutate { replace => { "type" => "apache_access" } }
    grok {
      match => { "message" => "%{COMBINEDAPACHELOG}" }
    }
  }
  date {
    match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
  }
}

output {
  elasticsearch {
    hosts => ["10.0.0.11:9200","10.0.0.12:9200","10.0.0.13:9200"]
    user => "elastic"
    password => "xxx"
  }
  stdout { codec => rubydebug }
}

#启动脚本
vim /opt/logstash/start.sh

nohup /opt/logstash/bin/logstash -f /opt/logstash/config/logstash-sample.conf > /dev/null 2> /dev/null &

#启动
sh start.sh

Kibana

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#安装
cd /opt
tar zxvf kibana-7.9.3-linux-x86_64.tar.gz 
mv kibana-7.9.3-linux-x86_64 kibana

#修改配置
vim kibana/config/kibana.yml

server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.30.136:9200","http://192.168.30.137:9200","http://192.168.30.138:9200"]
i18n.locale: "zh-CN"
elasticsearch.username: "kibana_system"
elasticsearch.password: "xxx"

#启动脚本
cd /opt/kibana

vim start.sh

nohup /opt/kibana/bin/kibana --allow-root &

#启动,登录账号elastic
sh start.sh

filebeat

创建用户

  1. 在kibana创建角色beats_write,集群权限:monitor,manage_index_templates,索引(*)权限:create_index、delete、monitor、write
  2. 创建用户beats_to_es
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#安装
cd /opt
tar xzvf filebeat‐7.13.2‐linux‐x86_64.tar.gz
mv filebeat‐7.13.2‐linux‐x86_64 filebeat

#修改配置
vim filebeat/filebeat.yml

filebeat.inputs:
- input_type: log
 paths:
 - /var/log/nginx/*.log
 tags: ["nginx"]
 
 output.logstash:
 #output.elasticsearch:
  hosts: ["192.168.30.136:5044","192.168.30.137:5044"]
  username: "beats_to_es"
  password: "xxx"

#检查配置
filebeat/filebeat test config -c filebeat/filebeat.yml

#启动脚本
vim /opt/filebeat/start.sh

nohup /opt/filebeat/filebeat -e -c /opt/filebeat/filebeat.yml >/dev/null 2>&1 &

#启动
sh start.sh

常见问题

Limit of total fields [1000]

更新所有现有索引的设置

1
2
3
curl -XPUT --user elastic:xxx 'http://localhost:9200/_all/_settings?preserve_existing=true' -d '{
  "index.mapping.total_fields.limit" : "3000"
}'

创建索引模板以应用默认值到未来的索引

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
curl -XPUT 'http://localhost:9200/_template/default_template' -H 'Content-Type: application/json' -d '{
  "index_patterns": ["*"],
  "settings": {
    "index.mapping.total_fields.limit": "3000"
  },
  "mappings": {
    "properties": {
      "data": {
        "properties": {
          "param": {
            "type": "object"
          }
        }
      }
    }
  }
}'

重命名索引

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
curl -X POST "localhost:9200/_reindex" -H 'Content-Type: application/json' -d'
{
  "source": {
    "index": "index-name"
  },
  "dest": {
    "index": "index-name-bak"
  }
}
'

curl -X DELETE "localhost:9200/index-name"