[关闭]
@zhangsiming65965 2019-03-18T08:01:49.000000Z 字数 20194 阅读 175

ELK日志分析平台(分离式部署ELKStack)

消息队列与数据库缓存

---Author:张思明 ZhangSiming

---Mail:1151004164@cnu.edu.cn

---QQ:1030728296

如果对技术文档中的内容有任何疑问,欢迎加微信:zhangsiming422或者QQ:1030728296一起讨论学习!


一、ELK Stack日志收集平台概述

1.1需求背景

一般传统的分析日志,直接在日志文件中进行sed、grep、awk分析即可,但是这种方式在大量日志数据的环境下比较繁杂,费时费力,难以得到高效率的数据及直观的呈现形式,于是就有了构建一套集中式日志系统,统一收集日志分析,可以提高日志分析效率。

1.2Filebeat+ELK主流日志收集流程

image_1d5bfevsqh6jngl11323jc1nmj9.png-46kB

日志就是数据,数据就可以产生价值。由于公司对日志收集的需求,引进Filebeat+ELK Stack平台进行日志收集分析。

1.Filebeat从生产环境收集到日志数据(Log File);
2.Filebeat把数据传输到Logstash进行日志过滤筛查;其中包含三部分:input、filter、output,含有丰富的过滤插件,可以有效地过滤优化日志数据;
3.Logstash把优化后的日志数据写入Elasticsearch数据库存储;
4.Kibana从Elasticsearch数据库拿取日志数据在Web界面显示给相关人员查看分析。

二、部署ELK Stack日志平台

2.1部署Kibana

Kibana用于展现数据,他具有Web界面,但是他本身不存储数据,只是从数据库中拿取。

  1. [root@ZhangSiming ~]# ls
  2. anaconda-ks.cfg common_install.sh kibana-6.2.3-linux-x86_64.tar.gz
  3. [root@ZhangSiming ~]# useradd -s /sbin/nologin -M elk
  4. #创建elk程序用户
  5. [root@ZhangSiming ~]# tar xf kibana-6.2.3-linux-x86_64.tar.gz -C /usr/local/
  6. [root@ZhangSiming ~]# mv /usr/local/kibana-6.2.3-linux-x86_64/ /usr/local/kibana[root@ZhangSiming ~]# cd /usr/local/kibana/
  7. [root@ZhangSiming kibana]# vim config/kibana.yml
  8. [root@ZhangSiming kibana]# sed -n '2p;7p' config/kibana.yml
  9. server.port: 5601
  10. server.host: "0.0.0.0"
  11. [root@ZhangSiming kibana]# chown -R elk.elk /usr/local/kibana/
  12. [root@ZhangSiming kibana]# vim /usr/local/kibana/bin/start.sh
  13. [root@ZhangSiming kibana]# cat /usr/local/kibana/bin/start.sh
  14. nohup /usr/local/kibana/bin/kibana >> /tmp/kibana.log 2>> /tmp/kibana.log &
  15. #nohup ... &表示防止session终止或者ctrl+c类型前台终止服务
  16. [root@ZhangSiming kibana]# chmod a+x /usr/local/kibana/bin/start.sh
  17. [root@ZhangSiming kibana]# su -s /bin/bash elk '/usr/local/kibana/bin/start.sh'
  18. [root@ZhangSiming kibana]# ps -elf | grep elk | grep -v grep
  19. 0 S elk 11918 1 27 80 0 - 316236 ep_pol 16:54 pts/0 00:00:03 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
  20. [root@ZhangSiming kibana]# netstat -antup | grep 5601
  21. tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 11918/node
  22. #启动成功,监听5601端口
  23. [root@ZhangSiming kibana]# tail /tmp/kibana.log
  24. {"type":"log","@timestamp":"2019-03-07T08:55:30Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"Unable to revive connection: http://localhost:9200/"}
  25. {"type":"log","@timestamp":"2019-03-07T08:55:30Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"No living connections"}
  26. {"type":"log","@timestamp":"2019-03-07T08:55:32Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"Unable to revive connection: http://localhost:9200/"}
  27. {"type":"log","@timestamp":"2019-03-07T08:55:32Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"No living connections"}
  28. {"type":"log","@timestamp":"2019-03-07T08:55:35Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"Unable to revive connection: http://localhost:9200/"}
  29. {"type":"log","@timestamp":"2019-03-07T08:55:35Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"No living connections"}
  30. {"type":"log","@timestamp":"2019-03-07T08:55:37Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"Unable to revive connection: http://localhost:9200/"}
  31. {"type":"log","@timestamp":"2019-03-07T08:55:37Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"No living connections"}
  32. {"type":"log","@timestamp":"2019-03-07T08:55:40Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"Unable to revive connection: http://localhost:9200/"}
  33. {"type":"log","@timestamp":"2019-03-07T08:55:40Z","tags":["warning","elasticsearch","admin"],"pid":11918,"message":"No living connections"}
  34. #没有连接到Elasticsearch,因为还没有配置

image_1d5bm3nu81asd15qo13bg13u2flj1p.png-96kB

  1. #源码安装Nginx
  2. [root@ZhangSiming ~]# ls
  3. anaconda-ks.cfg kibana-6.2.3-linux-x86_64.tar.gz nginx_install.sh
  4. common_install.sh nginx-1.10.2.tar.gz
  5. [root@ZhangSiming ~]# cat nginx_install.sh
  6. #!/bin/bash
  7. yum -y install pcre-devel openssl-devel
  8. tar xf nginx-1.10.2.tar.gz -C /usr/src/
  9. cd /usr/src/nginx-1.10.2/
  10. useradd -s /sbin/nologin -M nginx
  11. ./configure --user=nginx --group=nginx --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
  12. make && make install
  13. ln -s /usr/local/nginx/sbin/\* /usr/local/sbin/
  14. [root@ZhangSiming ~]# sh nginx_install.sh
  15. [root@ZhangSiming ~]# nginx -V
  16. nginx version: nginx/1.10.2
  17. built by gcc 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC)
  18. built with OpenSSL 1.0.2k-fips 26 Jan 2017
  19. TLS SNI support enabled
  20. configure arguments: --user=nginx --group=nginx --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
  21. [root@ZhangSiming ~]# cd /usr/local/nginx/
  22. [root@ZhangSiming nginx]# vim conf/nginx.conf
  23. [root@ZhangSiming nginx]# cat conf/nginx.conf
  24. worker_processes 1;
  25. events {
  26. worker_connections 1024;
  27. }
  28. http {
  29. include mime.types;
  30. default_type application/octet-stream;
  31. sendfile on;
  32. keepalive_timeout 65;
  33. log_format main '$remote_addr - $remote_user [$time_local] "$request"'
  34. '$status $body_bytes_sent "$http_referer"'
  35. '"$http_user_agent""$http_x_forwarded_for"';
  36. server {
  37. listen 5609;
  38. server_name www.kibana.com;
  39. access_log /usr/local/nginx/logs/kibana_access.log main;
  40. error_log /usr/local/nginx/logs/kibana_error.log error;
  41. location / {
  42. auth_basic "elk auth"
  43. auth_basic_user_file /usr/local/nginx/conf/htpasswd;
  44. proxy_pass http://127.0.0.1:5601;
  45. #Nginx也可以allow、denyip限制,限制
  46. }
  47. }
  48. }
  49. #使用openssl生成密码文件
  50. [root@ZhangSiming nginx]# openssl passwd -crypt
  51. Password:
  52. Verifying - Password:
  53. WFaQjx/45ljv.
  54. [root@ZhangSiming nginx]# vim /usr/local/nginx/conf/htpasswd
  55. [root@ZhangSiming nginx]# cat /usr/local/nginx/conf/htpasswd
  56. zhangsiming:WFaQjx/45ljv.
  57. #账号和加密的密码
  58. #Kibana限制只能本地访问,重启Kibana
  59. [root@ZhangSiming kibana]# vim config/kibana.yml
  60. [root@ZhangSiming kibana]# sed -n '7p' config/kibana.yml
  61. server.host: "127.0.0.1"
  62. [root@ZhangSiming kibana]# ps -elf | grep kibana
  63. 0 S elk 11918 1 0 80 0 - 321323 ep_pol 16:54 pts/0 00:00:06 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
  64. 0 R root 14502 1180 0 80 0 - 28176 - 17:16 pts/0 00:00:00 grep --color=auto kibana
  65. [root@ZhangSiming kibana]# kill -9 11918
  66. [root@ZhangSiming kibana]# su -s /bin/bash elk '/usr/local/kibana/bin/start.sh'
  67. [root@ZhangSiming nginx]# nginx
  68. #启动Nginx

image_1d5bneatsnqkae81fh9fc9oct26.png-24.5kB

image_1d5bnes4339b118u1c7g4hdjg42j.png-86.1kB

2.2部署Elasticsearch

Elasticsearch是用来存储日志数据的,需要jdk环境才能运行。

  1. #部署Jdk环境
  2. [root@ZhangSiming ~]# ls
  3. anaconda-ks.cfg elasticsearch-6.2.3.tar.gz
  4. common_install.sh jdk-8u60-linux-x64.tar.gz
  5. [root@ZhangSiming ~]# tar xf jdk-8u60-linux-x64.tar.gz -C /usr/local/
  6. [root@ZhangSiming ~]# mv /usr/local/jdk1.8.0_60/ /usr/local/jdk
  7. [root@ZhangSiming ~]# vim /etc/profile
  8. [root@ZhangSiming ~]# tail -3 /etc/profile
  9. export JAVA_HOME=/usr/local/jdk/
  10. export PATH=$PATH:$JAVA_HOME/bin
  11. export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$CLASSPATH
  12. [root@ZhangSiming ~]# . /etc/profile
  13. [root@ZhangSiming ~]# java -version
  14. java version "1.8.0_60"
  15. Java(TM) SE Runtime Environment (build 1.8.0_60-b27)
  16. Java HotSpot(TM) 64-Bit Server VM (build 25.60-b23, mixed mode)
  17. #安装Elasticsearch
  18. [root@ZhangSiming ~]# tar xf elasticsearch-6.2.3.tar.gz -C /usr/local
  19. [root@ZhangSiming ~]# mv /usr/local/elasticsearch-6.2.3/ /usr/local/elasticsearch
  20. [root@ZhangSiming ~]# cd /usr/local/elasticsearch
  21. [root@ZhangSiming elasticsearch]# ls
  22. bin lib logs NOTICE.txt README.textile
  23. config LICENSE.txt modules plugins
  24. [root@ZhangSiming elasticsearch]# vim config/elasticsearch.yml
  25. [root@ZhangSiming elasticsearch]# sed -n '33p;37p;55p;59p' config/elasticsearch.yml
  26. path.data: /usr/local/elasticsearch/data
  27. path.logs: /usr/local/elasticsearch/logs
  28. network.host: 0.0.0.0
  29. http.port: 9200
  30. #Kibana要连接这个地址才能获取Elasticsearch的数据
  31. [root@ZhangSiming elasticsearch]# useradd -s /sbin/nologin elk
  32. [root@ZhangSiming elasticsearch]# chown -R elk.elk /usr/local/elasticsearch/
  33. [root@ZhangSiming elasticsearch]# vim config/jvm.options
  34. [root@ZhangSiming elasticsearch]# sed -n '22,23p' config/jvm.options
  35. -Xms100M
  36. -Xmx100M
  37. #为了防止虚拟机内存跑满,限制JVM堆内存为100M大小,取消伸缩区
  38. [root@ZhangSiming elasticsearch]# vim bin/start.sh
  39. [root@ZhangSiming elasticsearch]# cat bin/start.sh
  40. /usr/local/elasticsearch/bin/elasticsearch -d >> /tmp/elasticsearch.log 2>> /tmp/elasticsearch.log
  41. #启动脚本,daemon的方式运行Elasticsearch
  42. [root@ZhangSiming elasticsearch]# chmod a+x bin/start.sh
  43. [root@ZhangSiming elasticsearch]# su -s /bin/bash elk '/usr/local/elasticsearch/bin/start.sh'
  44. [root@ZhangSiming elasticsearch]# ps -elf | grep elk | grep -v grep
  45. 0 S elk 1408 1 35 80 0 - 537021 futex_ 17:43 pts/0 00:00:03 /usr/local/jdk//bin/java -Xms100M -Xmx100M -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=/tmp/elasticsearch.T7KaOQi4 -XX:+HeapDumpOnOutOfMemoryError -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime -Xloggc:logs/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=32 -XX:GCLogFileSize=64m -Des.path.home=/usr/local/elasticsearch -Des.path.conf=/usr/local/elasticsearch/config -cp /usr/local/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -d
  46. #切换到Kibana服务器,修改配置文件
  47. [root@ZhangSiming kibana]# vim config/kibana.yml
  48. [root@ZhangSiming kibana]# sed -n '21p' config/kibana.yml
  49. elasticsearch.url: "http://192.168.17.139:9200"
  50. #正确连接Elasticsearch
  51. [root@ZhangSiming kibana]# ps -elf | grep kibana | grep -v grep
  52. 0 S elk 14524 1 0 80 0 - 321729 ep_pol 17:16 pts/0 00:00:07 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
  53. [root@ZhangSiming kibana]# kill -9 14524
  54. [root@ZhangSiming kibana]# /usr/local/kibana/bin/start.sh
  1. [root@ZhangSiming elasticsearch]# tail -10 logs/elasticsearch.log
  2. [2019-03-07T17:50:55,927][INFO ][o.e.b.BootstrapChecks ] [L_zSuq3] bound or publishing to a non-loopback address, enforcing bootstrap checks
  3. [2019-03-07T17:50:55,945][ERROR][o.e.b.Bootstrap ] [L_zSuq3] node validation exception
  4. [3] bootstrap checks failed
  5. [1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
  6. [5]: max number of threads [3802] for user [elk] is too low, increase to at least [4096]
  7. [6]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
  8. [2019-03-07T17:50:56,186][INFO ][o.e.n.Node ] [L_zSuq3] stopping ...
  9. [2019-03-07T17:50:56,209][INFO ][o.e.n.Node ] [L_zSuq3] stopped
  10. [2019-03-07T17:50:56,209][INFO ][o.e.n.Node ] [L_zSuq3] closing ...
  11. [2019-03-07T17:50:56,237][INFO ][o.e.n.Node ] [L_zSuq3] closed
  12. #我们可以看到Elasticsearch是被强制关闭了的,对于非监听127.0.0.1的Elasticsearch,需要修改一些内核参数,我们根据日志提示修改
  13. [root@ZhangSiming elasticsearch]# vim /etc/security/limits.conf
  14. [root@ZhangSiming elasticsearch]# sed -n '61,64p' /etc/security/limits.conf
  15. * soft nofile 65536
  16. * hard nofile 65536
  17. elk soft nproc 4096
  18. elk hard nproc 4096
  19. elk soft memlock unlimited
  20. elk hard memlock unlimited
  21. [root@ZhangSiming elasticsearch]# vim /etc/sysctl.conf
  22. [root@ZhangSiming elasticsearch]# sed -n '12p' /etc/sysctl.conf
  23. vm.max_map_count=262144
  24. [root@ZhangSiming elasticsearch]# sysctl -p
  25. vm.max_map_count = 262144
  26. [root@ZhangSiming elasticsearch]# vim config/elasticsearch.yml
  27. [root@ZhangSiming elasticsearch]# sed -n '43,44p' config/elasticsearch.yml
  28. bootstrap.memory_lock: true
  29. bootstrap.system_call_filter: false
  30. [root@ZhangSiming elasticsearch]# exit
  31. #重新启动Elasticsearch
  32. [root@ZhangSiming ~]# su -s /bin/bash elk '/usr/local/elasticsearch/bin/start.sh'
  33. [root@ZhangSiming ~]# ps -elf | grep elasticsearch | grep -v grep
  34. 0 S elk 2395 1 14 80 0 - 567583 futex_ 18:34 pts/0 00:00:15 /usr/local/jdk//bin/java -Xms100M -Xmx100M -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=/tmp/elasticsearch.yF7qH5Hg -XX:+HeapDumpOnOutOfMemoryError -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime -Xloggc:logs/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=32 -XX:GCLogFileSize=64m -Des.path.home=/usr/local/elasticsearch -Des.path.conf=/usr/local/elasticsearch/config -cp /usr/local/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -d
  35. [root@ZhangSiming ~]# netstat -antup | grep 9200
  36. tcp6 0 0 :::9200 :::* LISTEN 2395/java
  37. tcp6 0 0 192.168.17.139:9200 192.168.17.143:38452 ESTABLISHED 2395/java
  38. tcp6 0 0 192.168.17.139:9200 192.168.17.143:38454 ESTABLISHED 2395/java
  39. #成功启动

image_1d5brmd0cb1611rdkb81livst530.png-80.5kB

2.3部署Logstash

Logstash是用来读取日志,正则分析日志,发送给Elasticsearch的,拥有非常多的过滤插件,很强大。

  1. #Logstash需要Jdk环境
  2. [root@ZhangSiming ~]# tar xf jdk-8u60-linux-x64.tar.gz -C /usr/local/
  3. [root@ZhangSiming ~]# mv /usr/local/jdk1.8.0_60/ /usr/local/jdk
  4. [root@ZhangSiming ~]# vim /etc/profile
  5. [root@ZhangSiming ~]# tail -3 /etc/profile
  6. export JAVA_HOME=/usr/local/jdk/
  7. export PATH=$PATH:$JAVA_HOME/bin
  8. export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$CLASSPATH
  9. [root@ZhangSiming ~]# . /etc/profile
  10. [root@ZhangSiming ~]# java -version
  11. java version "1.8.0_60"
  12. Java(TM) SE Runtime Environment (build 1.8.0_60-b27)
  13. Java HotSpot(TM) 64-Bit Server VM (build 25.60-b23, mixed mode)
  14. #安装Logstash
  15. [root@ZhangSiming ~]# tar xf logstash-6.2.3.tar.gz -C /usr/local/
  16. [root@ZhangSiming ~]# mv /usr/local/logstash-6.2.3/ /usr/local/logstash
  17. [root@ZhangSiming ~]# cd /usr/local/logstash
  18. [root@ZhangSiming logstash]# vim config/jvm.options
  19. [root@ZhangSiming logstash]# sed -n '6,7p' config/jvm.options
  20. -Xms150M
  21. -Xmx150M
  22. #修改JVM堆内存150M大小,取消伸缩区
  23. #Logstash默认没有,需要自己创建
  24. [root@ZhangSiming ~]# vim /usr/local/logstash/config/logstash.conf
  25. [root@ZhangSiming ~]# cat /usr/local/logstash/config/logstash.conf
  26. input {
  27. file {
  28. path => "/usr/local/nginx/logs/access.log"
  29. }
  30. }
  31. output {
  32. elasticsearch {
  33. hosts => ["http://192.168.17.139:9200"]
  34. }
  35. }
  36. [root@ZhangSiming ~]# useradd -s /sbin/nologin -M elk
  37. [root@ZhangSiming ~]# vim /usr/local/logstash/bin/start.sh
  38. [root@ZhangSiming ~]# cat /usr/local/logstash/bin/start.sh
  39. #!/bin/bash
  40. nohup /usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf >> /tmp/logstash.log 2>>/tmp/logstash.log &
  41. #logstash并没有监听端口,因此不需要用elk用户来启动
  42. [root@ZhangSiming ~]# chmod a+x /usr/local/logstash/bin/start.sh
  43. [root@ZhangSiming ~]# /usr/local/logstash/bin/start.sh
  44. [root@ZhangSiming ~]# ps -ef | grep logstash
  45. root 3935 1 5 19:08 pts/0 00:00:00 /usr/local/jdk//bin/java -Xms150M -Xmx150M -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djruby.compile.invokedynamic=true -Djruby.jit.threshold=0 -XX:+HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/urandom -cp /usr/local/logstash/logstash-core/lib/jars/animal-sniffer-annotations-1.14.jar:/usr/local/logstash/logstash-core/lib/jars/commons-compiler-3.0.8.jar:/usr/local/logstash/logstash-core/lib/jars/error_prone_annotations-2.0.18.jar:/usr/local/logstash/logstash-core/lib/jars/google-java-format-1.5.jar:/usr/local/logstash/logstash-core/lib/jars/guava-22.0.jar:/usr/local/logstash/logstash-core/lib/jars/j2objc-annotations-1.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-annotations-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-databind-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-dataformat-cbor-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/janino-3.0.8.jar:/usr/local/logstash/logstash-core/lib/jars/javac-shaded-9-dev-r4023-3.jar:/usr/local/logstash/logstash-core/lib/jars/jruby-complete-9.1.13.0.jar:/usr/local/logstash/logstash-core/lib/jars/jsr305-1.3.9.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-api-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-slf4j-impl-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/logstash-core.jar:/usr/local/logstas/logstash-core/lib/jars/slf4j-api-1.7.25.jar org.logstash.Logstash -f /usr/local/logstash/config/logstash.conf
  46. root 3960 1191 0 19:08 pts/0 00:00:00 grep --color=auto logstash

image_1d5bttmfm1g491ee81vqlrgg1s8v3d.png-71.7kB

image_1d5bu049qtf61ep0i1f4dd1uhh3q.png-71.2kB

image_1d5bu1fdd1oscl7jc84fomsqt47.png-74.7kB

image_1d5bu381lf7k6pc1hddhef1jqj4k.png-76.8kB

image_1d5bu7e6trop1l60jru1llbmu551.png-81.5kB

  1. [root@ZhangSiming logstash]# tail /usr/local/nginx/logs/access.log
  2. 127.0.0.1 - - [07/Mar/2019:19:16:04 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  3. 127.0.0.1 - - [07/Mar/2019:19:16:04 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  4. 127.0.0.1 - - [07/Mar/2019:19:16:05 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  5. 127.0.0.1 - - [07/Mar/2019:19:16:05 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  6. 127.0.0.1 - - [07/Mar/2019:19:16:06 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  7. 127.0.0.1 - - [07/Mar/2019:19:16:06 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  8. 127.0.0.1 - - [07/Mar/2019:19:16:06 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  9. 127.0.0.1 - - [07/Mar/2019:19:16:07 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  10. 127.0.0.1 - - [07/Mar/2019:19:16:07 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"
  11. 127.0.0.1 - - [07/Mar/2019:19:16:08 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"

image_1d5buafcu7c1sca1ivpguumvf5e.png-84.7kB

Logstash部署成功,至此ELKStack部署完成

三、Logstash使用详解(一)

3.1Logstash工作原理详解

Logstash使用管道方式进行日志的搜集处理和输出。有点类似于管道命令xxx|ccc|ddd,xxx执行完了会执行ccc,然后执行ddd。

image_1d5bum1nd1dic10u4q2v1tohcji5r.png-69.5kB

3.2常用命令

3.3Logstash配置文件

Logstash处理流程经历了三个阶段,配置文件也同样由三个模块组成:input、filter、output。

  1. #指定多个来源数据的配置文件书写方式
  2. input {
  3. file { path => "/var/log/messages" type => "syslog" }
  4. file { path => "/var/log/apache/access.log" type => "apache" }
  5. }

3.4利用logstash的正则进行日志信息的抓取测试

  1. #修改Logstash配置文件
  2. [root@ZhangSiming logstash]# vim config/logstash.conf
  3. [root@ZhangSiming logstash]# cat config/logstash.conf
  4. input {
  5. stdin{} #从标准输入读取数据
  6. }
  7. filter {
  8. grok {
  9. match => {
  10. "message" => '(?<ip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}).*'
  11. }
  12. }
  13. }
  14. output {
  15. elasticsearch { #如果要输入到elasticsearch里,那么需要删除掉stdout{}
  16. hosts => ["http://192.168.17.139:9200"]
  17. }
  18. stdout { #只将信息输出到屏幕上
  19. codec => rubydebug #用于正则提取测试,将正则抓取结果输出到屏幕上
  20. }
  21. }
  22. #重启Logstash
  23. [root@ZhangSiming logstash]# kill -9 3935
  24. [root@ZhangSiming logstash]# /usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf
  25. 192.168.17.1 - zhangsiming [07/Mar/2019:19:24:07 +0800] "POST /elasticsearch/_msearch HTTP/1.1"200 804 "http://192.168.17.143:5609/app/kibana""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36""-"
  26. {
  27. "message" => "192.168.17.1 - zhangsiming [07/Mar/2019:19:24:07 +0800] \"POST /elasticsearch/_msearch HTTP/1.1\"200 804 \"http://192.168.17.143:5609/app/kibana\"\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\"\"-\"",
  28. "host" => "ZhangSiming",
  29. "ip" => "192.168.17.1",
  30. #成功过滤出ip
  31. "@version" => "1",
  32. "@timestamp" => 2019-03-07T13:00:19.666Z
  33. }
  34. #将标准输出写入Elasticsearch
  35. [root@ZhangSiming logstash]# vim config/logstash.conf
  36. [root@ZhangSiming logstash]# tail -4 config/logstash.conf
  37. # stdout {
  38. # codec => rubydebug
  39. # }
  40. }
  41. [root@ZhangSiming logstash]# bin/start.sh

image_1d5c43r9ffh11f561imo1cp32a6p.png-73.1kB

  1. #继续修改Logstash配置文件,定义多个字段
  2. [root@ZhangSiming logstash]# cat config/logstash.conf
  3. input {
  4. file {
  5. path => "/usr/local/nginx/logs/access.log"
  6. }
  7. }
  8. filter {
  9. grok {
  10. match => {
  11. "message" => '(?<ip>[0-9.]+) .*HTTP/1.1" (?<mark>[0-9]+) (?<size>[0-9]+) ".*121 (?<client>[a-zA-Z]+).*'
  12. }
  13. }
  14. }
  15. output {
  16. elasticsearch {
  17. hosts => ["http://192.168.17.139:9200"]
  18. }
  19. }
  20. #重启Logstash
  21. [root@ZhangSiming logstash]# ps -elf | grep logstash
  22. 0 S root 18003 1 25 80 0 - 583828 futex_ 21:11 pts/0 00:01:43 /usr/local/jdk//bin/java -Xms150M -Xmx150M -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djruby.compile.invokedynamic=true -Djruby.jit.threshold=0 -XX:+HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/urandom -cp /usr/local/logstash/logstash-core/lib/jars/animal-sniffer-annotations-1.14.jar:/usr/local/logstash/logstash-core/lib/jars/commons-compiler-3.0.8.jar:/usr/locallogstash/logstash-core/lib/jars/error_prone_annotations-2.0.18.jar:/usr/local/logstash/logstash-core/lib/jars/google-java-format-1.5.jar:/usr/local/logstash/logstash-core/lib/jars/guava-22.0.jar:/usr/local/logstash/logstash-core/lib/jars/j2objc-annotations-1.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-annotations-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-databind-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-dataformat-cbor-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/janino-3.0.8.jar:/usr/local/logstash/logstash-core/lib/jars/javac-shaded-9-dev-r4023-3.jar:/usr/local/logstash/logstash-core/lib/jars/jruby-complete-9.1.13.0.jar:/usr/local/logstash/logstash-core/lib/jars/jsr305-1.3.9.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-api-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-slf4j-impl-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/logstash-core.jar:/usr/local/logstash/logstash-core/lib/jars/slf4j-api-1.7.25.jar org.logstash.Logstash -f /usr/local/logstash/config/logstash.conf
  23. 0 R root 18078 1191 0 80 0 - 28176 - 21:18 pts/0 00:00:00 grep --color=auto logstash
  24. [root@ZhangSiming logstash]# kill -9 18003
  25. [root@ZhangSiming logstash]# bin/start.sh

image_1d5iqaj0m1d7a2oh168jgd31s169.png-64.9kB

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注