Linux操作文档——ELK日志分析系统


软件 说明 参考文档
Elasticsearch 数据库,存储数据 官方参考文档
logstash 日志收集,过滤数据 官方参考文档
kibana 分析,过滤,展示 官方参考文档
filebeat 收集日志,传输到ES或logstash 官方参考文档

一、部署Elasticsearch软件

1、安装 Elasticsearch 软件

[root@node-1 ~]# yum -y install java
[root@node-1 ~]# mkdir -p /data/soft
[root@node-1 ~]# cd /data/soft/
[root@node-1 soft]# wget https://mirrors.tuna.tsinghua.edu.cn/elasticstack/6.x/yum/6.6.0/elasticsearch-6.6.0.rpm
[root@node-1 soft]# wget https://mirrors.tuna.tsinghua.edu.cn/elasticstack/6.x/yum/6.6.0/filebeat-6.6.0-x86_64.rpm
[root@node-1 soft]# wget https://mirrors.tuna.tsinghua.edu.cn/elasticstack/6.x/yum/6.6.0/kibana-6.6.0-x86_64.rpm
[root@node-1 soft]# wget https://mirrors.tuna.tsinghua.edu.cn/elasticstack/6.x/yum/6.6.0/logstash-6.6.0.rpm
[root@node-1 ~]# yum -y install elasticsearch-6.6.0.rpm 
[root@node-1 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: node-1             //群集中本机节点名
path.data: /data/elasticsearch             //数据目录
path.logs: /var/log/elasticsearch             //日志目录
bootstrap.memory_lock: true             //锁定内存
network.host: 192.168.1.10,127.0.0.1             //监听的ip地址
http.port: 9200             //端口号
[root@node-1 ~]# mkdir -p /data/elasticsearch              //创建数据目录
[root@node-1 ~]# chown -R elasticsearch.elasticsearch /data/elasticsearch/             //修改权限
[root@node-1 ~]# vim /etc/elasticsearch/jvm.options             //分配锁定内存
-Xms1g             //分配最小内存	
-Xmx1g             //分配最大内存,官方推荐为物理内存的一半,但最大为32G
[root@node-1 ~]# systemctl edit elasticsearch             //修改锁定内存后,无法重启,解决方法
[Service]
LimitMEMLOCK=infinity             //F2保存退出
[root@node-1 ~]# systemctl daemon-reload
[root@node-1 ~]# systemctl restart elasticsearch

2、添加elasticsearch群集

[root@node-2 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: node-1
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
network.host: 192.168.1.20,127.0.0.1
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.1.10", "192.168.1.20"]             //主节点,工作节点
discovery.zen.minimum_master_nodes: 2             //添加的值=节点数/2 + 1

3、常见群集管理监控命令

[root@node-1 ~]# curl -XPUT '192.168.1.10:9200/vipinfo/users/1?pretty&pretty' -H 'Content-Type: application/json' -d '{"name": "guofucheng","age": "45","job": "mingxing"}'             //创建索引
[root@node-1 ~]# curl -XGET '192.168.1.10:9200/_cat/indices?pretty'             //查看索引信息
[root@node-1 ~]# curl -XGET '192.168.1.10:9200/_cluster/health?pretty'             //查看群集健康状态
[root@node-1 ~]# curl -XGET '192.168.1.10:9200/_cat/nodes?human&pretty'             //统计群集节点
[root@node-1 ~]# curl -XGET '192.168.1.10:9200/_nodes/_all/info/jvm.process?human&pretty'             //查看群集所有节点详细信息

二、安装Elasticsearch-head插件

1、本机安装

[root@node-1 ~]# git clone https://github.com/mobz/elasticsearch-head.git
[root@node-1 ~]# yum -y install epel-release
[root@node-1 ~]# yum -y install nodejs npm
[root@node-1 ~]# cd elasticsearch-head/
[root@node-1 elasticsearch-head]# cnpm install
[root@node-1 elasticsearch-head]# cd _site/
[root@node-1 _site]# vim app.js 
#原代码为this.base_uri = this.config.base_uri;
this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://192.168.1.10:9200";
[root@node-1 ~]# vim /etc/elasticsearch/elasticsearch.yml 
http.cors.enabled: true
http.cors.allow-origin: "*"
[root@node-1 ~]# cd elasticsearch-head/
[root@node-1 elasticsearch-head]# node_modules/grunt/bin/grunt server &

2、浏览器插件安装

下载es-head插件,https://github.com/mobz/elasticsearch-head
下载后,解压,复制crx目录下es-head.crx到桌面
改名es-head.crx为es-head.crx.zip
解压es-head.crx.zip到es-head.crx目录,把目录es-head.crx,上传到谷歌浏览器开发工具–扩展程序里

三、构建es+kibana+filebeat架构(小型)

1、安装kibana

[root@node-1 ~]# cd /data/soft
[root@node-1 soft]# rpm -ivh kibana-6.6.0-x86_64.rpm 
[root@node-1 ~]# vim /etc/kibana/kibana.yml
server.port: 5601
server.host: "192.168.1.10"
server.name: "node-1"             //所在主机的主机名
elasticsearch.hosts: ["http://192.168.1.10:9200"]             //es服务器的ip,便于接收日志数据
[root@node-1 ~]# systemctl start kibana

2、安装filebeat

[root@node-1 ~]# cd /data/soft
[root@node-1 soft]# rpm -ivh filebeat-6.6.0-x86_64.rpm 
[root@node-1 soft]# vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
    - /var/log/*.log

output.elasticsearch:
  output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  username: "elastic"
  password: "admin"
setup.kibana:
  host: "192.168.1.10:5601"
[root@node-1 soft]# systemctl start filebeat

3、收集nginx日志

[root@node-1 ~]# yum -y install epel-release
[root@node-1 ~]# yum -y install nginx httpd-tools             //安装nginx,httpd-tools
[root@node-1 ~]# systemctl start nginx
[root@node-1 ~]# vim /etc/nginx/nginx.conf
http {
    
                 //添加在http {
    
    }内
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    log_format log_json '{ "@timestamp": "$time_local", '
    '"remote_addr": "$remote_addr", '
    '"referer": "$http_referer", '
    '"request": "$request", '
    '"status": $status, '
    '"bytes": $body_bytes_sent, '
    '"agent": "$http_user_agent", '
    '"x_forwarded": "$http_x_forwarded_for", '
    '"up_addr": "$upstream_addr",'
    '"up_host": "$upstream_http_host",'
    '"up_resp_time": "$upstream_response_time",'
    '"request_time": "$request_time"'
    ' }';
    access_log  /var/log/nginx/access.log  log_json;
[root@node-1 ~]# vim /etc/filebeat/filebeat.yml             //配置access.log和error.log分开
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  json.keys_under_root: true
  json.overwrite_keys: true
  tags: ["access"]

- type: log
  enabled: true
  paths:
    - /var/log/nginx/error.log
  tags: ["error"]
output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  indices:
    - index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        tags: "access"
    - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        tags: "error"
  setup.template.name: "nginx"
  setup.template.patten: "nginx-*"
  setup.template.enabled: false
  setup.template.overwrite: true
[root@node-1 ~]# ab -n 100 -c 20 http://192.168.1.10/             //使用ab压力测试工具测试访问
[root@node-1 ~]# tail -1 /var/log/nginx/access.log 
{
    
     "@timestamp": "05/Aug/2020:21:18:15 +0800", "remote_addr": "192.168.1.10", "referer": "-", "request": "GET / HTTP/1.0", "status": 200, "bytes": 4833, "agent": "ApacheBench/2.3", "x_forwarded": "-", "up_addr": "-","up_host": "-","up_resp_time": "-","request_time": "0.000" }

4、收集tomcat日志

[root@node01 ~]# vim /etc/tomcat/server.xml
		<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
               prefix="localhost_access_log." suffix=".txt"
			   pattern="{&quot;clientip&quot;:&quot;%h&quot;,&quot;ClientUser&quot;:&quot;%l&quot;,&quot;authenticated&quot;:&quot;%u&quot;,&quot;AccessTime&quot;:&quot;%t&quot;,&quot;method&quot;:&quot;%r&quot;,&quot;status&quot;:&quot;%s&quot;,&quot;SendBytes&quot;:&quot;%b&quot;,&quot;Query?string&quot;:&quot;%q&quot;,&quot;partner&quot;:&quot;%{Referer}i&quot;,&quot;AgentVersion&quot;:&quot;%{User-Agent}i&quot;}"/>
[root@node01 ~]# vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/tomcat/localhost_access_log.*.txt
  json.keys_under_root: true
  json.overwrite_keys: true
  tags: ["tomcat"]    
  
output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  index: "tomcat_access-%{[beat.version]}-%{+yyyy.MM}"

setup.template.name: "tomcat"
setup.template.pattern: "tomcat_*"
setup.template.enabled: false
setup.template.overwrite: true
[root@node01 ~]# systemctl restart filebeat

5、收集java多行匹配模式

[root@node01 ~]# vim /etc/tomcat/server.xml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/elasticsearch/elasticsearch.log
  multiline.pattern: '^\['
  multiline.negate: true
  multiline.match: after

output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  index: "es-%{[beat.version]}-%{+yyyy.MM}"
setup.template.name: "es"
setup.template.pattern: "es-*"
setup.template.enabled: false
setup.template.overwrite: true
[root@node01 ~]# systemctl restart filebeat

6、收集docker日志

[root@node01 ~]# mkdir /opt/{nginx,mysql}       //日志挂载目录(仅参考)
[root@node01 ~]# vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log 
  enabled: true
  paths:
    - /var/lib/docker/containers/*/*-json.log
  json.keys_under_root: true
  json.overwrite_keys: true

output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  indices:
    - index: "docker-nginx-access-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        stream: "stdout"
        attrs.service: "nginx"
    - index: "docker-nginx-error-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        stream: "stderr"
        attrs.service: "nginx"
    - index: "docker-db-access-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        stream: "stdout"
        attrs.service: "db"
    - index: "docker-db-error-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        stream: "stderr"
        attrs.service: "db"

setup.template.name: "docker"
setup.template.pattern: "docker-*"
setup.template.enabled: false
setup.template.overwrite: true
[root@node01 ~]# systemctl restart filebeat

7、收集syslog日志

让我们生成一个 SSL 证书来保护从客户端 Rsyslog & Filebeat 到 Logstash 服务器的日志数据传输。

[root@node01 ~]# mkdir -p /etc/logstash/ssl
[root@node01 ~]# cd /etc/logstash/
openssl req -subj '/CN=elk-master/' -x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout ssl/logstash-forwarder.key -out ssl/logstash-forwarder.crt

为 Logstash 创建名为“filebeat-input.conf”的新配置文件作为来自 filebeat ‘syslog-filter.conf’ 的输入文件用于系统日志处理,以及用于定义 Elasticsearch 输出的 ‘output-elasicsearch.conf’ 文件.

[root@node01 ~]# cd /etc/logstash/
[root@node01 ~]# vim conf.d/filebeat-input.conf
input {
    
      
  beats {
    
        
  port => 5443    
  type => syslog    
  ssl => true    
  ssl_certificate => "/etc/logstash/ssl/logstash-forwarder.crt"    
  ssl_key => "/etc/logstash/ssl/logstash-forwarder.key"  }}

对于系统日志数据处理,我们将使用名为“grok”的过滤器插件。创建一个新的配置文件。文件 'syslog-filter.conf 在同一目录中

[root@node01 ~]# vim conf.d/syslog-filter.conf
filter {
    
      
  if [type] == "syslog" {
    
        
    grok {
    
          
      match => {
    
     "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }      
      add_field => [ "received_at", "%{@timestamp}" ]      
      add_field => [ "received_from", "%{host}" ]    }    
    date {
    
          
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]    
    }  
  }
}

最后为elasticsearch的输出创建一个配置文件’output-elasticsearch.conf’。

[root@node01 ~]# vim conf.d/output-elasticsearch.conf
output {
    
      
  elasticsearch {
    
     hosts => ["localhost:9200"]    
    hosts => "localhost:9200"    
    manage_template => false    
    index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"    
    document_type => "%{[@metadata][type]}"  
  }
}

将 Logstash 证书文件 - logstash-forwarder.crt - 复制到 /etc/filebeat 目录

[root@node01 ~]# cp /etc/logstash/ssl/logstash-forwarder.crt /etc/filebeat/sudo service filebeat restart

创建配置文件到目录 /etc/logstash/conf.d 并创建一个 logstash.conf 文件

[root@node01 ~]# cd /etc/logstash/conf.d
[root@node01 ~]# vim logstash.conf
input {
    
                                                                                         
  udp {
    
                                                                                         
    host => "127.0.0.1"                                                                     
    port => 10514                                                                            
    codec => "json"                                                                         
    type => "rsyslog"                                                                       
  }                                                                                          
}    
                                                                                                                                                         
# The Filter pipeline stays empty here, no formatting is done.    
filter {
    
     } 

                                                                                     
# Every single log will be forwarded to ElasticSearch. If you are using another port, you should specify it here.                                                                                             
output {
    
                                                                                        
  if [type] == "rsyslog" {
    
                                                                      
    elasticsearch {
    
                                                                              
      hosts => [ "127.0.0.1:9200" ]                                                         
    }                                                                                       
  }
}

Rsyslog 使用模板转换日志以转发 rsylog 中的日志

[root@node01 ~]# cd /etc/rsyslog.d
[root@node01 ~]# vim 70-output.conf
# This line sends all lines to defined IP address at port 10514# using the json-template format.

*.*         @127.0.0.1:10514;json-template
[root@node01 ~]# vim 01-json-template.conf
template(name="json-template"  type="list") {
    
        constant(value="{")      constant(value="\"@timestamp\":\"")     property(name="timereported" dateFormat="rfc3339")      constant(value="\",\"@version\":\"1")      constant(value="\",\"message\":\"")     property(name="msg" format="json")      constant(value="\",\"sysloghost\":\"")  property(name="hostname")      constant(value="\",\"severity\":\"")    property(name="syslogseverity-text")      constant(value="\",\"facility\":\"")    property(name="syslogfacility-text")      constant(value="\",\"programname\":\"") property(name="programname")      constant(value="\",\"procid\":\"")      property(name="procid")    constant(value="\"}\n")}
[root@node01 ~]# systemctl restart rsyslog
[root@node01 ~]# curl -XGET 'http://localhost:9200/logstash-*/_search?q=*&pretty'

8、模块收集nginx日志

确认nginx日志为普通格式

[root@node01 elasticsearch]# sudo bin/elasticsearch-plugin install ingest-geoip
[root@node01 elasticsearch]# sudo bin/elasticsearch-plugin install ingest-user-agent
[root@node01 ~]# systemctl restart elasticsearch
[root@node01 ~]# filebeat modules enable nginx
Enabled nginx
[root@node01 ~]# vim /etc/filebeat/modules.d/nginx.yml
- module: nginx
  access:
    enabled: true
    var.paths: ["/var/log/nginx/*.log"]

  error:
    enabled: true
    var.paths: ["/var/log/nginx/error.log"]
[root@node01 ~]# vim /etc/filebeat/filebeat.yml
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: true 
  reload.period: 10s

output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  indices:
  - index: "nginx-www-%{[beat.version]}-%{+yyyy.MM}"
    when.contains:
      source: "/var/log/nginx/www.log"
  - index: "nginx-blog-%{[beat.version]}-%{+yyyy.MM}"
    when.contains:
      source: "/var/log/nginx/blog.log"
  - index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
    when.contains:
      source: "/var/log/nginx/error.log"

setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
[root@node01 ~]# systemctl restart filebeat

9、模块收集mysql慢日志

[root@mysql-1 ~]# vim /etc/my.cnf
[mysqld]
user=mysql
basedir=/usr/local/mysql
datadir=/data/mysql/data
socket=/tmp/mysql.sock
server_id=6
log_bin=/data/binlog/mysql-bin
binlog_format=row
port=3306
log_error=/tmp/mysql3306.log
gtid-mode=on
enforce-gtid-consistency=true
#开启慢日志
slow_query_log=1 
#文件位置及名字(提前创建路径及赋权 )
slow_query_log_file=/data/mysql/slow.log
#设定慢查询时间
long_query_time=0.1
#没走索引的语句也记录
log_queries_not_using_indexes
[mysql]
socket=/tmp/mysql.sock
[root@mysql-1 ~]# systemctl restart mysqld
[root@mysql-1 ~]# systemctl restart mysql 
[root@node01 ~]# filebeat module enable mysql
[root@node01 ~]# vim /etc/filebeat/modules.d/mysql.yml
module: mysql
error:
  enabled: true
  var.paths: ["/data/mysql/slow.log"]

slowlog:
  enabled: true 
  var.paths: ["/data/mysql/slow.log"]
[root@node01 ~]# vim /etc/filebeat/filebeat.yml
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: true
  reload.period: 10s

output.elasticsearch:
  hosts: ["192.168.1.10:9200"]
  indices:
    - index: "mysql_slowlog-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        fileset.module: "mysql"
        fileset.name: "slowlog"
    - index: "mysql_error-%{[beat.version]}-%{+yyyy.MM}"
      when.contains:
        fileset.module: "mysql"
        fileset.name: "error"

setup.template.name: "mysql"
setup.template.pattern: "mysql_*"
setup.template.enabled: false
setup.template.overwrite: true
[root@node01 ~]# systemctl restart filebeat

四、构建filebeat+redis+logstash+es+kibana架构(大型)

1、安装redis

[root@node-1 ~]# mkdir -p /opt/redis_cluster/redis_6379/{conf,logs,pid}
[root@node-1 ~]# cd /data/soft/
[root@node-1 soft]# wget http://download.redis.io/releases/redis-5.0.7.tar.gz
[root@node-1 soft]# tar xf redis-5.0.7.tar.gz -C /opt/redis_cluster/
[root@node-1 soft]# ln -s /opt/redis_cluster/redis-5.0.7  /opt/redis_cluster/redis
[root@node-1 soft]# cd /opt/redis_cluster/redis
[root@node-1 redis]# make && make install 
[root@node-1 redis]# vim /opt/redis_cluster/redis_6379/conf/6379.conf
bind 127.0.0.1 192.168.1.10
port 6379
daemonize yes
pidfile /opt/redis_cluster/redis_6379/pid/redis_6379.pid
logfile /opt/redis_cluster/redis_6379/logs/redis_6379.log
databases 16
dbfilename redis.rdb
dir /opt/redis_cluster/redis_6379
[root@node-1 redis]# redis-server /opt/redis_cluster/redis_6379/conf/6379.conf

2、修改filebeat配置文件,output给redis

[root@node-1 ~]# vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  json.keys_under_root: true
  json.overwrite_keys: true
  tags: ["access"]

- type: log
  enabled: true
  paths:
    - /var/log/nginx/error.log
  tags: ["error"]

setup.template.settings:
  index.number_of_shards: 3

setup.kibana:

output.redis:
  hosts: ["192.168.1.10"]
  key: "filebeat"
  db: 0
  timeout: 5
[root@node-1 ~]# systemctl restart filebeat
[root@node-1 ~]# ab -n 100 -c 20 http://192.168.1.10/             //使用ab压力测试工具测试访问
[root@node-1 ~]# redis-cli              //登录
127.0.0.1:6379> keys *             //列出所有键
1) "filebeat"
127.0.0.1:6379> type filebeat              //filebeat为键值名
list
127.0.0.1:6379> LLEN filebeat             //查看list长度
(integer) 100
127.0.0.1:6379> LRANGE filebeat 0 -1             //查看list所有内容
  1) "{
    
    \"@timestamp\":\"2020-08-05T13:53:57.104Z\",\"@metadata\":{
    
    \"beat\":\"filebeat\",\"type\":\"doc\",\"version\":\"6.6.0\"},\"up_resp_time\":\"-\",\"source\":\"/var/log/nginx/access.log\",\"error\":{
    
    \"message\":\"@timestamp not overwritten (parse error on 05/Aug/2020:21:53:49 +0800)\",\"type\":\"json\"},\"input\":{
    
    \"type\":\"log\"},\"host\":{
    
    \"name\":\"node-1\"},\"agent\":\"ApacheBench/2.3\",\"up_host\":\"-\",\"remote_addr\":\"192.168.1.10\",\"bytes\":4833,\"prospector\":{
    
    \"type\":\"log\"},\"log\":{
    
    \"file\":{
    
    \"path\":\"/var/log/nginx/access.log\"}},\"request\":\"GET / HTTP/1.0\",\"request_time\":\"0.000\",\"tags\":[\"access\"],\"beat\":{
    
    \"version\":\"6.6.0\",\"name\":\"node-1\",\"hostname\":\"node-1\"},\"up_addr\":\"-\",\"x_forwarded\":\"-\",\"offset\":82800,\"referer\":\"-\",\"status\":200}"
......

3、安装logstash,收集redis的日志,提交给es

[root@node-1 ~]# cd /data/soft/
[root@node-1 soft]# rpm -ivh logstash-6.6.0.rpm 
[root@node-1 ~]# vim /etc/logstash/conf.d/redis.conf             //实现access和error日志分离
input {
    
    
  redis {
    
    
    host => "192.168.1.10"
    port => "6379"
    db => "0"
    key => "filebeat"
    data_type => "list"
  }
}

filter {
    
    
  mutate {
    
    
    convert => ["upstream_time","float"]
    convert => ["request_time","float"]
  }
}

output {
    
    
  stdout {
    
    }
   if "access" in [tags] {
    
    
    elasticsearch {
    
    
      hosts => ["http://192.168.1.10:9200"]
      index => "nginx_access-%{+YYYY.MM.dd}"
      manage_template => false
    }
   }
   if "error" in [tags] {
    
    
    elasticsearch {
    
    
      hosts => ["http://192.168.1.10:9200"]
      index => "nginx_error-%{+YYYY.MM.dd}"
      manage_template => false
    }
   }
}
[root@node-1 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis.conf             //启动logstash

4、安装nginx+keepalived对redis实现负载均衡*

[root@node01 ~]# yum -y install nginx
[root@node01 ~]# systemctl start nginx
[root@node01 ~]# vim /etc/nginx/nginx.conf
stream {
    
    
  upstream redis {
    
    
     server 192.168.1.20:6379 max_fails=2 fail_timeout=10s;
     server 192.168.1.30:6379 max_fails=2 fail_timeout=10s;
     }

  server {
    
    
        listen 6379;
        proxy_connect_timeout 1s;
        proxy_timeout 3s;
        proxy_pass redis;
  }
}
[root@node01 ~]# yum -y install keepalived
[root@node01 ~]# vim /etc/keepalived/keepalived.conf 
global_defs {
    
    
   router_id lb1
}

vrrp_instance VI_1 {
    
    
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        192.168.1.254
    }
}
[root@node01 ~]# systemctl restart keepalived

五、kibana使用

访问:http://IP:5601

猜你喜欢

转载自blog.csdn.net/g950904/article/details/104554319