ELK中logstash的使用
elk的搭建教程
前段时间已经搭建了elk,但是使用过程中有很多需求,下面是更改的logstash的配置文件,达到了已下目的:
- 实现从filebeat收集日志,并且过滤
- 很多公司都会将有用的日志存入数据库,实现从数据库中将日志取出存入es
input {
beats {
port => "8888"
}
jdbc {
# 数据库连接地址:
jdbc_connection_string => "jdbc:mysql://192.168.1.128:3306/test?characterEncoding=UTF-8&useSSL=false&autoReconnect=true&allowLoadLocalInfile=false&autoDeserialize=false"
# 数据连接账号
jdbc_user => "test"
# 数据库连接密码
jdbc_password => "test#3"
# jdbc工具包,放在可以访问当前服务器上,绝对路径填写
jdbc_driver_library => "/ssd/1/share/ls-cn-n6w1w8r6l00a/logstash/current/config/custom/mysql-connector-java-5.1.9.jar"
# 驱动类名称
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "5000"
# 字符集设置
codec => plain {
charset => "UTF-8"}
# 设定使用查询的字段名
use_column_value => "true"
# 追踪字段
tracking_column => "date_created"
# 字段的格式
tracking_column_type => "timestamp"
# 记录最后的值
record_last_run => "true"
# jdbc 默认时区
jdbc_default_timezone => "Asia/Shanghai"
#上一个sql_last_value值的存放文件路径, 必须要在文件中指定字段的初始值
# last_run_metadata_path => "G:\Developer\Elasticsearch5.5.1\ES5\logstash-5.5.1\bin\mysql\station_parameter.txt"
# statement需要的是sql+
statement => "select * from stock_operate where date_created > :sql_last_value order by date_created"
# 可以将sql存于文件中,添加绝对路径
statement_filepath => "G:\Developer\Elasticsearch5.5.1\ES5\logstash-5.5.1\bin\mysql\jdbc.sql"
clean_run => "false"
# 执行计划,设置监听间隔
schedule => "*/30 * * * * *"
# 索引类型,用于下述中区分数据
type => "stock_operate"
}
}
# 用于过滤
filter {
#if "--" not in [message]{
# drop {}
#}
if "nginx" in [tags]{
# logstash支持json解释日志,
json{
source => "message"
# 删除不需要的字段
remove_field => ["beat","input","file"]
}
}
# 区分服务
if "dc-usercenter" in [tags]{
# 通过正则匹配,匹配出的字段会添加到日志的字段中
grok {
match => {
message => "\ %{
LOGLEVEL:logLevel}\ "
}
}
grok {
match => {
message => "(?<phone>1(3|5|4|8|9)\d{
9})"
}
}
}
if "synchost" in [tags] {
grok {
match => {
message => "-- (?<loglevel>(INF|ERR))--"
}
}
grok {
match => {
message => "\"item_no\":\"(?<item_no>[^\"]*)\".*"
}
}
grok {
match => {
message => "\"business_id\":\"(?<business_id>[^\"]*)\""
}
}
grok {
match => {
message => "(?<order_no>z[0-9]{
11})"
}
}
}
# 数据中查出来的都是json格式数据,直接用json解析就好
if [type] == "stock_operate"{
json {
source => "message"
remove_field => ["message"]
}
}
if [type] == "cliniclog_sys_log_total"{
json {
source => "message"
remove_field => ["message"]
}
}
}
output {
if "nginx" in [tags]{
elasticsearch{
hosts => "http://192.168.1.120:9200"
index => "access-nginx-%{
+YYYY.MM.dd}"
user => "elastic"
password => "$R(test*peX2"
}
}
if "dc-usercenter" in [tags] {
elasticsearch{
hosts => "http://192.168.1.120:9200"
index => "dc-usercenter-%{
+YYYY.MM.dd}"
user => "elastic"
password => "$R(test*peX2"
}
}
if "synchost" in [tags] {
elasticsearch{
hosts => "http://192.168.1.120:9200"
index => "synchost-%{
+YYYY.MM.dd}"
user => "elastic"
password => "$R(test*peX2"
}
}
if [type] == "stock_operate"{
elasticsearch{
hosts => "http://192.168.1.120:9200"
index => "cliniclog_stock_operate-%{
+YYYY.MM.dd}"
user => "elastic"
password => "$R(test*peX2"
}
if [type] == "cliniclog_sys_log_total"{
elasticsearch{
hosts => "http://192.168.1.120:9200"
index => "cliniclog_sys_log_total-%{
+YYYY.MM.dd}"
user => "test"
password => "$R(test*peX2"
}
stdout{
codec => rubydebug}
}