目录
软件说明:
所有软件包下载地址:Past Releases of Elastic Stack Software | Elastic
打开页面后选择对应的组件及版本即可!
所有软件包名称如下:
架构拓扑
集群模式:
单机模式
环境准备
关闭防火墙
#systemctl disable --now firewalld
关闭安全上下文
# sed -i 's/SELIUNX=enforcing/SELINUX=disable/' /etc/selinux/config
# setenforce 0
网络对时 ( 查看时间戳,如果时间不准确那就每台主机对时)
#vim /etc/chrony.conf
server ntp.aliyun.com iburst #添加这一行
#timedatectl set-timezone Asia/Shanghai
#systemctl restart chronyd.service
查看java是否已安装
可使用yum安装
kibana logstash elasticsearch (也装上Java ,filebeat可不用安装)
注意:logstash 和 elasticsearch 必须装 java
#java -ser java # Centos7 默认安装的是 java8
#yum install java
流程图
部署:
kibana
主机IP:192.168.158.139
[root@kibana ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.158.139 kibana
192.168.158.140 es
192.168.158.141 logstash
[root@kibana ~]# ls
anaconda-ks.cfg ceph-release-1-1.el7.noarch.rpm kibana-7.1.1-x86_64.rpm
[root@kibana ~]# rpm -ivh kibana-7.1.1-x86_64.rpm
es
主机:192.168.158.140
yum install java
[root@es ~]# ls
anaconda-ks.cfg ceph-release-1-1.el7.noarch.rpm elasticsearch-7.1.1-x86_64.rpm
[root@es ~]# rpm -ivh elasticsearch-7.1.1-x86_64.rpm
logstash
主机:192.168.158.141
yum install java
[root@logstash ~]# ls
anaconda-ks.cfg ceph-release-1-1.el7.noarch.rpm logstash-7.1.1.rpm
[root@logstash ~]# rpm -ivh logstash-7.1.1.rpm
filebeat
主机:192.168.158.142
[root@web1 ~]# ls
apache-tomcat-8.5.40.tar.gz filebeat-7.1.1-x86_64.rpm
[root@web1 ~]# rpm -ivh filebeat-7.1.1-x86_64.rpm
es
检查能否启动
#systemctl start elasticsearch.service
#查看是否正常启动
#systemctl status elasticsearch.service
查看已启动
#systemctl stop elasticsearch.service
先停掉,因为还没修改配置文件
logstash
命令设置
[root@logstash ~]# ln -s /usr/share/logstash/bin/logstash /usr/local/bin/
并测试命令是否正常使用
es
vim /etc/elasticsearch/jvm.options
jvm.options (可调整elasticsezrch运行内存参数)
修改es配置文件
[root@es ~]# vim /etc/elasticsearch/elasticsearch.yml
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: my-application #集群名称
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: es #节点主机名(这里需要修改)
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch #数据存放目录
#
# Path to log files:
#
path.logs: /var/log/elasticsearch #日志存放目录
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 192.168.158.140 #本机esIP(这里需要修改)
#
# Set a custom port for HTTP:
#
http.port: 9200 #端口号
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
cluster.initial_master_nodes: ["es"] #本机名es(这里需要修改)
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
启用es
systemctl start elasticsearch.service
kibana
修改kibana配置文件(方便查看索引)
[root@kibana ~]# vim /etc/kibana/kibana.yml
server.prot: 5061 #打开端口
server.host: "192.168.158.139" #打开主机
server.name: "kibana" #本机主机名
elasticsearch.hosts: ["http://192.168.158.140:9200"] #es主机的端口号和IP
i18n.locale: "zh-CN" #这个可设置将网页翻译成中文
启用kibana
systemctl start kibana.service
查看是否已启用
[root@kibana ~]# ps aux | grep node
kibana 1405 0.3 5.7 1336880 232648 ? Ssl 10:44 2:15 /usr/share/kibana/bin/../node/bin/node --no-warnings --max-http-header-size=65536 /usr/share/kibana/bin/../src/cli -c /etc/kibana/kibana.yml
root 3517 0.0 0.0 112824 988 pts/0 S+ 21:35 0:00 grep --color=auto node
es
查看es是否已启用
[root@es ~]# ps aux | grep java
elastic+ 1414 0.7 38.2 4941372 1538396 ? Ssl 10:45 5:11 /usr/share/elasticsearch/jdk/bin/java -Xms1g -Xmx1g -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=/tmp/elasticsearch-2879596055492145001 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/lib/elasticsearch -XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m -Djava.locale.providers=COMPAT -Dio.netty.allocator.type=unpooled -Des.path.home=/usr/share/elasticsearch -Des.path.conf=/etc/elasticsearch -Des.distribution.flavor=default -Des.distribution.type=rpm -Des.bundled_jdk=true -cp /usr/share/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -p /var/run/elasticsearch/elasticsearch.pid --quiet
root 3805 0.0 0.0 112828 984 pts/0 S+ 21:36 0:00 grep --color=auto java
logstash
vim /etc/logstash/ jvm.options
(可调整logstash运行内存参数)
修改logstash配置文件
[root@logstash ~]# cd /etc//logstash/conf.d/
[root@logstash conf.d]# vim pipline.conf
使用filter过滤器
通过filebeat服务器指向的lostash服务器的ip和端口号,filebeat服务器将收集到的数据通过该ip和端口号输入(input)给logstash服务器,然后logstash服务器通过分层、过滤将数据输出(output)给es服务器,es服务器将数据存储(分布式存储索引集群),kibana服务器可通过对数据进行可视化、搜索、分析。
input {
file {
path => "/var/log/messages"
start_position => "beginning"
}
beats {
port => 5044
}
}
filter {
if [host][name] {
mutate { add_field => { "hostname" => "%{[host][name]}" } }
}
else if [agent][hostname] {
mutate { add_field => { "hostname" => "%{[agent][hostname]}" } }
}
else {
mutate { add_field => { "hostname" => "%{host}" } }
}
}
output {
if [hostname] == "logstash" {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "system-log-%{+YYYY.MM.dd}"
}
}
else if [hostname] == "web1" {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "web1-log-%{+YYYY.MM.dd}"
}
}
stdout {
codec => rubydebug
}
}
收集不同主机及对应应用的不同日志
input {
file {
path => "/var/log/messages"
start_position => "beginning"
}
beats {
port => 5044
}
}
filter {
if [host][name] {
mutate { add_field => { "hostname" => "%{[host][name]}" } }
}
else if [agent][hostname] {
mutate { add_field => { "hostname" => "%{[agent][hostname]}" } }
}
else {
mutate { add_field => { "hostname" => "%{host}" } }
}
}
output {
if [hostname] == "logstash" {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "system-log-%{+YYYY.MM.dd}"
}
}
else if [hostname] == "web1" {
if "system" in [tags] {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "web1-log-%{+YYYY.MM.dd}"
}
}
if "nginx-access" in [tags] {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "web1-nginx-access-log-%{+YYYY.MM.dd}"
}
}
if "nginx-error" in [tags] {
elasticsearch {
hosts => ["192.168.158.79:9200"]
index => "web1-nginx-error-log-%{+YYYY.MM.dd}"
}
}
}
stdout {
codec => rubydebug
}
}
filebeat
修改配置文件
[root@web1 ~]# cd /etc/filebeat/
[root@web1 filebeat]# vim filebeat.yml
- type: log
enabled: true
paths:
- /var/log/messages
tags: "system"
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
tags: "nginx-access"
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: "nginx-error"
filebeat.inputs: #输入源
- type: log #文件类型
enabled: true #启用
paths: # 日志文件路径
- /var/log/nginx/access.log
tags: "nginx-access" #标签源数据
修改为logstash的 IP 和 端口号
(这里的端口号是指向输入logstash的端口号)
启用 filebeat
systemctl start filebeat.service
注意:必须要有日志才会产生索引
logstash
启动logstash服务
[root@logstash ~]# logstash -f /etc/logstash/conf.d/pipline.conf
进入网页