@chendushuai
2019-11-11T06:32:34.000000Z
字数 34490
阅读 796
redis zk mq
root@ubuntuRedis:~# vim /etc/netplan/50-cloud-init.yamlnetwork:ethernets:ens33:dhcp4: falseaddresses: [192.168.202.130/24]gateway4: 192.168.202.1version: 2root@ubuntuRedis:~# sudo netplan apply
root@ubuntuRedis:~# vim /etc/hostname# 直接修改为对应的主机名称即可
/usr/bin/find /usr/log/applog/* -mtime +3 -ls
find /usr/log/applog/ -mtime +5 -name "tnp" -exec rm -rf {} \;
批量删除Key
bin/redis-cli -c -h 192.168.132.36 -p 6379 keys "area_info_*" | xargs bin/redis-cli -c -h 192.168.132.36 -p 6379 del
解压压缩文件
[redis@DEV-BS-1908-V429 ~]$ tar -zxvf redis-4.0.11.tar.gz
编辑配置文件redis.conf
[redis@DEV-BS-1908-V429 ~]$ vi redis.conf# 需要编辑的内容有bind 192.168.122.36protected-mode nologfile "logs/redis.log"daemonize yes
执行程序编译
[redis@DEV-BS-1908-V429 ~]$ make
安装到当前目录
[redis@DEV-BS-1908-V429 redis-4.0.11]$ make install PREFIX=/home/redis/redis-4.0.11cd src && make installmake[1]: Entering directory `/home/redis/redis-4.0.11/src'CC Makefile.depmake[1]: Leaving directory `/home/redis/redis-4.0.11/src'make[1]: Entering directory `/home/redis/redis-4.0.11/src'Hint: It's a good idea to run 'make test' ;)INSTALL installINSTALL installINSTALL installINSTALL installINSTALL installmake[1]: Leaving directory `/home/redis/redis-4.0.11/src'
新建logs目录
[redis@DEV-BS-1908-V429 redis-4.0.11]$ mkdir logs
[redis@DEV-BS-1908-V429 redis-4.0.11]$ bin/redis-server redis.conf
[redis@DEV-BS-1908-V429 redis-4.0.11]$ ps -ef | grep redisroot 21497 1781 0 11:11 ? 00:00:00 sshd: redis [priv]redis 21499 21497 0 11:11 ? 00:00:00 sshd: redis@pts/0redis 21500 21499 0 11:11 pts/0 00:00:00 -bashredis 24855 21500 0 11:16 pts/0 00:00:00 bin/redis-server 192.168.122.38:6379redis 24911 21500 0 11:20 pts/0 00:00:00 ps -efredis 24912 21500 0 11:20 pts/0 00:00:00 grep redis
[redis@DEV-BS-1908-V429 redis-4.0.11]$ bin/redis-cli -c -h 192.168.165.16 -p 6379
[redis@DEV-BS-1908-V429 redis-4.0.11]$ bin/redis-cli -c -h 192.168.165.16 -p 6379 cluster nodes# 若结果如下,则认为未启用集群ERR This instance has cluster support disabled
修改配置文件
[redis@SZ1-BS-1908-V1161 redis-4.0.11]$ vi sentinel.conf# 修改内容有:protected-mode nodir "/tmp"sentinel monitor mymaster 192.168.122.36 6379 2#注意全部是主IP地址sentinel known-slave tnp 172.21.64.46 6379sentinel known-slave tnp 172.21.64.47 6379sentinel known-sentinel tnp 172.21.64.46 16379 bd11d31008ecbf0b4fb0d042b462ef2299b545fasentinel known-sentinel tnp 172.21.64.47 16379 9bbb1066ee3e64dfd09c80068a12fbc02b5a0052
[redis@SZ1-BS-1908-V1161 redis-4.0.11]$ bin/redis-sentinel sentinel.conf26640:X 27 Aug 15:39:03.926 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo26640:X 27 Aug 15:39:03.926 # Redis version=4.0.11, bits=64, commit=00000000, modified=0, pid=26640, just started26640:X 27 Aug 15:39:03.926 # Configuration loaded
解压文件
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ tar -zxvf zookeeper-3.4.10.tar.gz
拷贝配置文件
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ cp conf/zoo_sample.cfg conf/zoo.cfg
编辑配置文件
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ vi conf/zoo.cfg# 修改内容为# The number of milliseconds of each ticktickTime=2000# The number of ticks that the initial# synchronization phase can takeinitLimit=10# The number of ticks that can pass between# sending a request and getting an acknowledgementsyncLimit=5# the directory where the snapshot is stored.dataDir=/home/zookeeper/zookeeper-3.4.10/data# the directory where the log is stored.dataLogDir=/home/zookeeper/zookeeper-3.4.10/logs# the port at which the clients will connectclientPort=2181maxClientCnxns=200server.1=192.168.122.36:2888:4888server.2=192.168.122.37:2888:4888server.3=192.168.122.38:2888:4888
创建数据目录和日志目录
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ mkdir data[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ mkdir logs
启动ZK
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ bin/zkServer.sh startZooKeeper JMX enabled by defaultUsing config: /home/zookeeper/zookeeper-3.4.10/bin/../conf/zoo.cfgUsage: bin/zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}
只有一台机器启动时,暂时不是集群
[zookeeper@DEV-BS-1908-V429 zookeeper-3.4.10]$ bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /home/zookeeper/zookeeper-3.4.10/bin/../conf/zoo.cfgMode: follower
[zookeeper@DEV-BS-1908-V429 zookeeper-3.4.10]$ bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /home/zookeeper/zookeeper-3.4.10/bin/../conf/zoo.cfgMode: leader
如果结果为
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /home/zookeeper/zookeeper-3.4.10/bin/../conf/zoo.cfgError contacting service. It is probably not running.
说明ZK启动失败,处理方案如下
[zookeeper@DEV-BS-1908-V427 zookeeper-3.4.10]$ vi conf/zoo.cfg# 后面的4888端口是否打通,建议将端口修改内允许返回内的端口,如20880~20950server.1=192.168.122.36:2888:4888server.2=192.168.122.37:2888:4888server.3=192.168.122.38:2888:4888
[zookeeper@DEV-BS-1908-V429 zookeeper-3.4.10]$ java -versionjava version "1.8.0_45"Java(TM) SE Runtime Environment (build 1.8.0_45-b14)Java HotSpot(TM) 64-Bit Server VM (build 25.45-b02, mixed mode)
此处我们使用的是默认的2181端口,因此查看2181端口是否被占用
[zookeeper@DEV-BS-1908-V429 zookeeper-3.4.10]$ netstat -ano | grep 2181[zookeeper@DEV-BS-1908-V429 zookeeper-3.4.10]$
删除version-2目录和zookeeper_server.pid文件,同时检查myid文件内配置是否正确(ZK多次启动,会修改该文件内的值)
[zookeeper@DEV-BS-1908-V427 data]$ rm -r version-2/[zookeeper@DEV-BS-1908-V427 data]$ rm zookeeper_server.pid
再次重新启动即可正常启动
[zookeeper@SZ1-BS-1908-V1164 zookeeper-3.4.10]$ bin/zkCli.shConnecting to localhost:21812019-08-20 16:35:48,231 [myid:] - INFO [main:Environment@100] - Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT2019-08-20 16:35:48,235 [myid:] - INFO [main:Environment@100] - Client environment:host.name=SZ1-BS-1908-V1164.lianlianpay-dc.com.............[zk: localhost:2181(CONNECTED) 16] ls /dubbo/com.lianlian.service.lock.LockService/providers[dubbo%3A%2F%2F172.21.64.43%3A20880%2Fcom.lianlian.service.lock.LockService%3Fanyhost%3Dtrue%26application%3Dpay_lock%26default.retries%3D0%26default.timeout%3D10000%26dubbo%3D2.5.6%26generic%3Dfalse%26interface%3Dcom.lianlian.service.lock.LockService%26methods%3Dunlock%2CisLocked%2Clock%26organization%3Dlianpay%26owner%3Dtnp-pay-lock%26pid%3D21504%26revision%3D0.0.1%26side%3Dprovider%26timestamp%3D1566291552023, dubbo%3A%2F%2F172.21.64.44%3A20880%2Fcom.lianlian.service.lock.LockService%3Fanyhost%3Dtrue%26application%3Dpay_lock%26default.retries%3D0%26default.timeout%3D10000%26dubbo%3D2.5.6%26generic%3Dfalse%26interface%3Dcom.lianlian.service.lock.LockService%26methods%3Dunlock%2CisLocked%2Clock%26organization%3Dlianpay%26owner%3Dtnp-pay-lock%26pid%3D20864%26revision%3D0.0.1%26side%3Dprovider%26timestamp%3D1566291672150]
注意 data文件夹必须要拷贝
配置文件中的data文件目录必须正确,在账号不同的情况下,可能会出现目录不同的情况
[rocketmq@SZ1-BS-1908-V1142 ~]$ cd /home/rocketmq/rocketmq-4.2/conf[rocketmq@SZ1-BS-1908-V1142 conf]$
[rocketmq@SZ1-BS-1908-V1142 conf]$ vi broker-a.propertiesnamesrvAddr=172.21.64.27:9876;172.21.64.28:9876brokerIP1=172.21.64.28listenPort=10911brokerIP2=172.21.64.28haListenPort=10912
[rocketmq@SZ1-BS-1908-V1142 conf]$ vi broker-a-s.propertiesnamesrvAddr=172.21.64.27:9876;172.21.64.28:9876brokerIP1=172.21.64.28listenPort=10921haMasterAddress=172.21.64.28:10912
[rocketmq@SZ1-BS-1908-V1143 rocketmq-4.2]$ nohup ./bin/mqnamesrv&
[rocketmq@SZ1-BS-1908-V1143 rocketmq-4.2]$ sh ./bin/mqbroker -c conf/broker-b.properties &[8] 11369[rocketmq@SZ1-BS-1908-V1143 rocketmq-4.2]$ tail -99f ~/logs/rocketmqlogs/broker.log
[rocketMQ@DEV-BS-1908-V428 ~]$ cd /home/rocketMQ/rocketmq-console/target[rocketMQ@DEV-BS-1908-V428 target]$ vi rocketmq-console-ng-1.0.0.jar修改 application.properties# 修改内容为#if this value is empty,use env value rocketmq.config.namesrvAddr NAMESRV_ADDR | now, you can set it in ops page.default localhost:9876rocketmq.config.namesrvAddr=192.168.122.36:9876;192.168.122.37:9876#rocketmq-console's data path:dashboard/monitorrocketmq.config.dataPath=/home/rocketMQ/rocketmq-console-data
[rocketMQ@DEV-BS-1908-V428 target]$ java -jar rocketmq-console-ng-1.0.0.jar &[5] 129601[4] Killed java -jar rocketmq-console-ng-1.0.0.jar[rocketMQ@DEV-BS-1908-V428 target]$ 16:34:23,779 |-INFO in ch.qos.logback.classic.LoggerContext[default] - Could NOT find resource [logback.groovy]16:34:23,780 |-INFO in ch.qos.logback.classic.LoggerContext[default] - Could NOT find resource [logback-test.xml]16:34:23,780 |-INFO in ch.qos.logback.classic.LoggerContext[default] - Found resource [logback.xml] at [jar:file:/home/rocketMQ/rocketmq-console/target/rocketmq-console-ng-1.0.0.jar!/BOOT-INF/classes!/logback.xml]16:34:23,820 |-INFO in ch.qos.logback.core.joran.spi.ConfigurationWatchList@5b6f7412 - URL [jar:file:/home/rocketMQ/rocketmq-console/target/rocketmq-console-ng-1.0.0.jar!/BOOT-INF/classes!/logback.xml] is not of type file16:34:23,880 |-INFO in ch.qos.logback.classic.joran.action.ConfigurationAction - debug attribute not set16:34:23,885 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - About to instantiate appender of type [ch.qos.logback.core.ConsoleAppender]16:34:23,896 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [STDOUT]16:34:23,905 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - Assuming default type [ch.qos.logback.classic.encoder.PatternLayoutEncoder] for [encoder] property16:34:23,956 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - About to instantiate appender of type [ch.qos.logback.core.rolling.RollingFileAppender]16:34:23,959 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [FILE]16:34:23,984 |-INFO in c.q.l.core.rolling.TimeBasedRollingPolicy@664223387 - No compression will be used16:34:23,986 |-INFO in c.q.l.core.rolling.TimeBasedRollingPolicy@664223387 - Will use the pattern /home/rocketMQ/logs/consolelogs/rocketmq-console-%d{yyyy-MM-dd}.%i.log for the active file16:34:23,989 |-INFO in ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP@312b1dae - The date pattern is 'yyyy-MM-dd' from file name pattern '/home/rocketMQ/logs/consolelogs/rocketmq-console-%d{yyyy-MM-dd}.%i.log'.16:34:23,989 |-INFO in ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP@312b1dae - Roll-over at midnight.16:34:23,993 |-INFO in ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP@312b1dae - Setting initial period to Mon Aug 26 16:31:48 CST 201916:34:23,994 |-WARN in ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP@312b1dae - SizeAndTimeBasedFNATP is deprecated. Use SizeAndTimeBasedRollingPolicy instead16:34:23,996 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - Assuming default type [ch.qos.logback.classic.encoder.PatternLayoutEncoder] for [encoder] property16:34:23,999 |-INFO in ch.qos.logback.core.rolling.RollingFileAppender[FILE] - Active log file name: /home/rocketMQ/logs/consolelogs/rocketmq-console.log16:34:23,999 |-INFO in ch.qos.logback.core.rolling.RollingFileAppender[FILE] - File property is set to [/home/rocketMQ/logs/consolelogs/rocketmq-console.log]16:34:24,000 |-INFO in ch.qos.logback.classic.joran.action.RootLoggerAction - Setting level of ROOT logger to INFO16:34:24,001 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attaching appender named [STDOUT] to Logger[ROOT]16:34:24,001 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attaching appender named [FILE] to Logger[ROOT]16:34:24,001 |-INFO in ch.qos.logback.classic.joran.action.ConfigurationAction - End of configuration.16:34:24,002 |-INFO in ch.qos.logback.classic.joran.JoranConfigurator@7530d0a - Registering current configuration as safe fallback point. ____ _ __ _ _/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \\\/ ___)| |_)| | | | | || (_| | ) ) ) )' |____| .__|_| |_|_| |_\__, | / / / /=========|_|==============|___/=/_/_/_/:: Spring Boot :: (v1.4.3.RELEASE)[2019-08-26 16:34:24.649] INFO Starting App v1.0.0 on DEV-BS-1908-V428 with PID 129601 (/home/rocketMQ/rocketmq-console/target/rocketmq-console-ng-1.0.0.jar started by rocketMQ in /home/rocketMQ/rocketmq-console/target)[2019-08-26 16:34:24.653] INFO No active profile set, falling back to default profiles: default[2019-08-26 16:34:24.758] INFO Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@23ab930d: startup date [Mon Aug 26 16:34:24 CST 2019]; root of context hierarchy[2019-08-26 16:34:24.954] INFO HV000001: Hibernate Validator 5.2.4.Final[2019-08-26 16:34:27.480] INFO Tomcat initialized with port(s): 8080 (http)
[hazelcast@SZ1-BS-1908-V1145 bin]$ cat hazelcast.xml# 修改内容<management-center enabled="false">http://172.21.64.30:8080/mancenter</management-center><tcp-ip enabled="true"><member-list><member>172.21.64.29</member><member>172.21.64.30</member></member-list></tcp-ip>
[hazelcast@SZ1-BS-1908-V1145 hazelcast-3.5.3]$ nohup sh server.sh &[1] 22805[hazelcast@SZ1-BS-1908-V1145 bin]$ nohup: ignoring input and appending output to `nohup.out'[hazelcast@SZ1-BS-1908-V1145 bin]$ tail -199f nohup.outJAVA_HOME found at /usr/java/jdk1.8.0_45Path to Java : /usr/java/jdk1.8.0_45/bin/java######################################### RUN_JAVA=/usr/java/jdk1.8.0_45/bin/java# JAVA_OPTS=# starting now....########################################Aug 20, 2019 11:06:39 AM com.hazelcast.config.XmlConfigLocatorINFO: Loading 'hazelcast-default.xml' from classpath.Aug 20, 2019 11:06:39 AM com.hazelcast.instance.DefaultAddressPickerINFO: [LOCAL] [dev] [3.5.3] Prefer IPv4 stack is true.Aug 20, 2019 11:06:39 AM com.hazelcast.instance.DefaultAddressPickerINFO: [LOCAL] [dev] [3.5.3] Picked Address[172.21.64.30]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is trueAug 20, 2019 11:06:39 AM com.hazelcast.spi.OperationServiceINFO: [172.21.64.30]:5701 [dev] [3.5.3] Backpressure is disabledAug 20, 2019 11:06:39 AM com.hazelcast.spi.impl.operationexecutor.classic.ClassicOperationExecutorINFO: [172.21.64.30]:5701 [dev] [3.5.3] Starting with 2 generic operation threads and 4 partition operation threads.Aug 20, 2019 11:06:40 AM com.hazelcast.systemINFO: [172.21.64.30]:5701 [dev] [3.5.3] Hazelcast 3.5.3 (20151011 - 64c663a) starting at Address[172.21.64.30]:5701Aug 20, 2019 11:06:40 AM com.hazelcast.systemINFO: [172.21.64.30]:5701 [dev] [3.5.3] Copyright (c) 2008-2015, Hazelcast, Inc. All Rights Reserved.Aug 20, 2019 11:06:40 AM com.hazelcast.instance.NodeINFO: [172.21.64.30]:5701 [dev] [3.5.3] Creating MulticastJoinerAug 20, 2019 11:06:40 AM com.hazelcast.core.LifecycleServiceINFO: [172.21.64.30]:5701 [dev] [3.5.3] Address[172.21.64.30]:5701 is STARTINGAug 20, 2019 11:06:45 AM com.hazelcast.cluster.impl.MulticastJoinerINFO: [172.21.64.30]:5701 [dev] [3.5.3]Members [1] {Member [172.21.64.30]:5701 this}Aug 20, 2019 11:06:45 AM com.hazelcast.core.LifecycleServiceINFO: [172.21.64.30]:5701 [dev] [3.5.3] Address[172.21.64.30]:5701 is STARTED
[hazelcast@SZ1-BS-1908-V1145 bin]$ ./server.shJAVA_HOME found at /usr/java/jdk1.8.0_45Path to Java : /usr/java/jdk1.8.0_45/bin/java######################################### RUN_JAVA=/usr/java/jdk1.8.0_45/bin/java# JAVA_OPTS=# starting now....########################################Aug 20, 2019 4:58:31 PM com.hazelcast.config.XmlConfigLocatorINFO: Loading 'hazelcast.xml' from working directory.Aug 20, 2019 4:58:31 PM com.hazelcast.instance.DefaultAddressPickerINFO: [LOCAL] [tnp] [3.5.3] Interfaces is disabled, trying to pick one address from TCP-IP config addresses: [172.21.64.30, 172.21.64.29]Aug 20, 2019 4:58:31 PM com.hazelcast.instance.DefaultAddressPickerINFO: [LOCAL] [tnp] [3.5.3] Prefer IPv4 stack is true.Aug 20, 2019 4:58:31 PM com.hazelcast.instance.DefaultAddressPickerINFO: [LOCAL] [tnp] [3.5.3] Picked Address[172.21.64.30]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is trueAug 20, 2019 4:58:31 PM com.hazelcast.spi.OperationServiceINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Backpressure is disabledAug 20, 2019 4:58:31 PM com.hazelcast.spi.impl.operationexecutor.classic.ClassicOperationExecutorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Starting with 2 generic operation threads and 4 partition operation threads.Aug 20, 2019 4:58:31 PM com.hazelcast.systemINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Hazelcast 3.5.3 (20151011 - 64c663a) starting at Address[172.21.64.30]:5701Aug 20, 2019 4:58:31 PM com.hazelcast.systemINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Copyright (c) 2008-2015, Hazelcast, Inc. All Rights Reserved.Aug 20, 2019 4:58:31 PM com.hazelcast.instance.NodeINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Creating TcpIpJoinerAug 20, 2019 4:58:31 PM com.hazelcast.core.LifecycleServiceINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.30]:5701 is STARTINGAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Connecting to /172.21.64.29:5703, timeout: 0, bind-any: trueAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Connecting to /172.21.64.29:5701, timeout: 0, bind-any: trueAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Connecting to /172.21.64.30:5703, timeout: 0, bind-any: trueAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Connecting to /172.21.64.30:5702, timeout: 0, bind-any: trueAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Connecting to /172.21.64.29:5702, timeout: 0, bind-any: trueAug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Could not connect to: /172.21.64.30:5703. Reason: SocketException[Connection refused to address /172.21.64.30:5703]Aug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Could not connect to: /172.21.64.30:5702. Reason: SocketException[Connection refused to address /172.21.64.30:5702]Aug 20, 2019 4:58:32 PM com.hazelcast.cluster.impl.TcpIpJoinerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.30]:5703 is added to the blacklist.Aug 20, 2019 4:58:32 PM com.hazelcast.cluster.impl.TcpIpJoinerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.30]:5702 is added to the blacklist.Aug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Could not connect to: /172.21.64.29:5702. Reason: SocketException[Connection refused to address /172.21.64.29:5702]Aug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.SocketConnectorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Could not connect to: /172.21.64.29:5703. Reason: SocketException[Connection refused to address /172.21.64.29:5703]Aug 20, 2019 4:58:32 PM com.hazelcast.cluster.impl.TcpIpJoinerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.29]:5703 is added to the blacklist.Aug 20, 2019 4:58:32 PM com.hazelcast.cluster.impl.TcpIpJoinerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.29]:5702 is added to the blacklist.Aug 20, 2019 4:58:32 PM com.hazelcast.nio.tcp.TcpIpConnectionManagerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Established socket connection between /172.21.64.30:50922Aug 20, 2019 4:58:38 PM com.hazelcast.cluster.ClusterServiceINFO: [172.21.64.30]:5701 [tnp] [3.5.3]Members [2] {Member [172.21.64.29]:5701Member [172.21.64.30]:5701 this}Aug 20, 2019 4:58:41 PM com.hazelcast.core.LifecycleServiceINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Address[172.21.64.30]:5701 is STARTED
此时去启动pay_lock服务时,在窗口会输出:
Aug 20, 2019 5:17:22 PM com.hazelcast.nio.tcp.SocketAcceptorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Accepting socket connection from /172.21.64.43:37455Aug 20, 2019 5:17:22 PM com.hazelcast.nio.tcp.TcpIpConnectionManagerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Established socket connection between /172.21.64.30:5701Aug 20, 2019 5:17:22 PM com.hazelcast.client.impl.client.AuthenticationRequestINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Received auth from Connection [/172.21.64.30:5701 -> /172.21.64.43:37455], endpoint=null, live=true, type=JAVA_CLIENT, successfully authenticated, principal : ClientPrincipal{uuid='a52bcd57-2f62-44dd-996f-eb62a92447a5', ownerUuid='f71d7884-6961-40ae-93d0-daf1a3e9cb99'}, owner connection : trueAug 20, 2019 5:17:24 PM com.hazelcast.nio.tcp.SocketAcceptorINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Accepting socket connection from /172.21.64.44:40414Aug 20, 2019 5:17:24 PM com.hazelcast.nio.tcp.TcpIpConnectionManagerINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Established socket connection between /172.21.64.30:5701Aug 20, 2019 5:17:24 PM com.hazelcast.client.impl.client.AuthenticationRequestINFO: [172.21.64.30]:5701 [tnp] [3.5.3] Received auth from Connection [/172.21.64.30:5701 -> /172.21.64.44:40414], endpoint=null, live=true, type=JAVA_CLIENT, successfully authenticated, principal : ClientPrincipal{uuid='6f321cca-f60d-40b3-9a3a-5400038acc0c', ownerUuid='f71d7884-6961-40ae-93d0-daf1a3e9cb99'}, owner connection : true
[dubbo@SZ1-BS-1908-V1158 conf]$ vi dubbo.properties# 修改内容如下:dubbo.registry.address=172.21.64.48:2181,172.21.64.49:2181,172.21.64.50:2181lockServer.ip1=172.21.64.29lockServer.ip2=172.21.64.30
[dubbo@SZ1-BS-1908-V1158 bin]$ ./start.shStarting the pay_lock .....OK!PID: 23043STDOUT: logs/stdout.log
[dubbo@SZ1-BS-1908-V1156 bin]$ cat readme.txt----------------------------------------------------------脚本功能说明--------------------------------------------------------1.addtype.sh新增序列号定义2.getalloffset.sh列举所有序号当前的offset值3.getoffset.sh列举指定序号当前的offset值4.initzk.sh手动切换账务日期为当前服务器日期,在序号系统重新部署在全新环境(新zookeeper集群)时,需要执行此脚本初始化5.listconf.sh列举所有序号定义6.loadconf.sh指定序号定义文件,一次性load配置,一般在需要初次上线或者全新环境部署时对原配置进行批量迁移7.setnodeinfo.sh设置序号应用当前节点值,目前序号系统支持多中心部署,内部通过%方式确保各中心产生的序号值不重复,此处设置中心节点数、以及当前中心的%余数值8.setoffset.sh设置指定序号定义的offset值9.updtype.sh修改序列定义----------------------------------------------------------序列号新集群部署初始化-------------------------------------------------------1.对序列号服务进行新集群部署时必须进行如下两步初始化操作:initzk.shsetnodeinfo.sh2.初始化完成后进行序列号定义,可通过addtype.sh逐个添加或者通过loadconf.sh进行文件导入。
[dubbo@SZ1-BS-1908-V1156 bin]$ ./initzk.sh 172.21.64.49[08-20 11:24:23] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.
[zookeeper@HZ3-BS-1811-V834 zookeeper-3.4.10]$ bin/zkCli.shConnecting to localhost:2181[zk: localhost:2181(CONNECTED) 3] ls /idgen/node_info[0][zk: localhost:2181(CONNECTED) 4] get /idgen/node_info/02,3cZxid = 0x600000018ctime = Tue Nov 20 13:49:49 CST 2018mZxid = 0x800003c97mtime = Tue Jul 16 14:42:47 CST 2019pZxid = 0x600000018cversion = 0dataVersion = 2aclVersion = 0ephemeralOwner = 0x0dataLength = 3numChildren = 0
此处可以看到已配置的环境ZK节点起始配置为2,3,则另一节点不可以使用2
其中2指的是起始序号,3为步长,也就是间隔步号,类似于2,5,6,9,12……
[dubbo@SZ1-BS-1908-V1156 bin]$ ./setnodeinfo.sh 172.21.64.49 1 3[08-20 11:31:05] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.
[dubbo@HZ3-BS-1811-V826 bin]$ ./listconf.sh 172.20.188.34:2181 > listConf.csv[dubbo@HZ3-BS-1811-V826 bin]$ cat listConf.csvConnect to zookeeper : 172.20.188.34:2181[08-20 11:34:28] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.1 => 1,[F:%G%m%d][SEQ],7,0,0,0,9999999,1,10002 => 2,[F:%G%m%d][SEQ],8,0,0,0,99999999,1,10003 => 3,[SEQ],10,0,0,0,9999999999,1,100,1
含义为设置的序号产生的数据格式。
1,[F:%G%m%d][SEQ],7,0,0,0,9999999,1,10002,[F:%G%m%d][SEQ],8,0,0,0,99999999,1,10003,[SEQ],10,0,0,0,9999999999,1,100,1
[dubbo@SZ1-BS-1908-V1156 bin]$ ./loadconf.sh 172.21.64.49:2181 ../init_ids.csv[08-20 14:41:36] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.
[dubbo@DEV-BS-1908-V429 bin]$ cat updtype.sh# update a ID type, $1 zookeeper cluster, $2 id, $3 new definition# e.g. addtype localhost:2181 888 888,P[F:%G-%m-%d][T:%H%M%S][SEQ],3,0,0,0,99999999,1,500# In the new grammar, %C%g will NOT work, use %G insteadjava -cp ../lib/*:../lib/pay_idgen_core-0.0.3-SNAPSHOT.jar com.lianlian.idgen.service.util.UPDConf $1 $2 $3
例如:
[dubbo@TEST-BS-1810-V059 bin]$ ./updtype.sh 192.168.132.34:2181 6 6,[F:%G%m%d][SEQ],9,0,0,0,999999999,1,1000[09-29 17:37:56] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.
部分情况下,可能需要重启服务才可以生效。
[dubbo@SZ1-BS-1908-V1156 bin]$ ./listconf.sh 172.21.64.49:2181Connect to zookeeper : 172.21.64.49:2181[08-20 14:41:44] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.1 => 1,[F:%G%m%d][SEQ],7,0,0,0,9999999,1,10002 => 2,[F:%G%m%d][SEQ],8,0,0,0,99999999,1,10003 => 3,[SEQ],10,0,0,0,9999999999,1,100,1
显示为上述格式,才是正确的导入完成,如果显示为下面的格式,则说明在导入前,未处理配置CSV文件为正确格式
[dubbo@SZ1-BS-1908-V1156 bin]$ ./listconf.sh 172.21.64.49:2181Connect to zookeeper : 172.21.64.49:2181[08-20 14:36:49] WARN ConnectionStateManager [ConnectionStateManager-0]: There are no ConnectionStateListeners registered.2 => 2 => 2 => 2,[F:%G%m%d][SEQ],8,0,0,0,99999999,1,10001 => 1 => 1 => 1,[F:%G%m%d][SEQ],7,0,0,0,9999999,1,10003 => 3 => 3 => 3,[SEQ],10,0,0,0,9999999999,1,100,1
此种情况下,需要先删除已有的配置
登录ZK服务器,使用zkCli.sh登录ZK
[zookeeper@SZ1-BS-1908-V1164 zookeeper-3.4.10]$ bin/zkCli.shConnecting to localhost:21812019-08-20 14:38:57,587 [myid:] - INFO [main:Environment@100] - Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT
查看节点配置
[zk: localhost:2181(CONNECTED) 4] ls /idgen/config[2 => 2, 1 => 1, 3 => 3]
删除错误的节点配置
[zk: localhost:2181(CONNECTED) 7] delete "/idgen/config/2 => 2"[zk: localhost:2181(CONNECTED) 9] delete "/idgen/config/1 => 1"[zk: localhost:2181(CONNECTED) 10] delete "/idgen/config/3 => 3"[zk: localhost:2181(CONNECTED) 11] ls /idgen/config[]
删除了错误的节点配置之后,重启应用即可
[dubbo@SZ1-BS-1908-V1156 bin]$ ./start.shStarting the pay_serial .....OK!PID: 17378STDOUT: logs/stdout.log
直接启动即可。
[ex@DEV-BS-1908-V510 ~]$ cd /home/ex/elasticsearch-5.6.11/config[ex@DEV-BS-1908-V510 config]$ vi elasticsearch.yml# 修改内容有# Path to directory where to store the data (separate multiple locations by comma):#path.data: /home/ex/data## Path to log files:#path.logs: /home/ex/logsnetwork.host: 192.168.122.39discovery.zen.ping.unicast.hosts: ["192.168.122.39:9301", "192.168.122.40:9301"]
https://my.oschina.net/u/2510243/blog/810520?tdsourcetag=s_pcqq_aiomsg
在root用户权限下
vi /etc/sysctl.conf# 添加下列配置vm.max_map_count=655360# 然后执行命令刷新配置sysctl -p# 编辑添加文件数量限制 该文件编辑后,在重启虚拟机之后生效vim /etc/security/limits.conf# 增加软硬配置* soft nofile 81960* hard nofile 81960* soft nproc 81960* hard nproc 81960# 另需要编辑临时文件,可以即时生效* - nproc 81960# 或使用下面的命令进行设置ulimit -n 81960# 或使用下面命令直接设置为最大值ulimit -Hn# 使用如下命令查看配置是否生效[ex@TEST-BS-1908-V512 ~]$ ulimit -acore file size (blocks, -c) 0data seg size (kbytes, -d) unlimitedscheduling priority (-e) 0file size (blocks, -f) unlimitedpending signals (-i) 31312max locked memory (kbytes, -l) 64max memory size (kbytes, -m) unlimitedopen files (-n) 81960pipe size (512 bytes, -p) 8POSIX message queues (bytes, -q) 819200real-time priority (-r) 0stack size (kbytes, -s) 10240cpu time (seconds, -t) unlimitedmax user processes (-u) 81960virtual memory (kbytes, -v) unlimitedfile locks (-x) unlimited
[es@HZ3UAT-BS-1909-V083 elasticsearch-5.6.11]$ bin/elasticsearch &[1] 57897[es@HZ3UAT-BS-1909-V083 elasticsearch-5.6.11]$ Java HotSpot(TM) 64-Bit Server VM warning: INFO: os::commit_memory(0x00000001e9990000, 25071910912, 0) failed; error='Cannot allocate memory' (errno=12)## There is insufficient memory for the Java Runtime Environment to continue.# Native memory allocation (mmap) failed to map 25071910912 bytes for committing reserved memory.# An error report file with more information is saved as:# /home/es/elasticsearch-5.6.11/hs_err_pid57897.log
由于elasticsearch5.0默认分配jvm空间大小为2g,修改jvm空间分配
# vim config/jvm.options-Xms2g-Xmx2g##修改为-Xms512m-Xmx512m
[ex@TEST-BS-1908-V511 bin]$ chmod +x *[ex@TEST-BS-1908-V511 bin]$ ./elasticsearch &[1] 129732[ex@TEST-BS-1908-V511 bin]$ [2019-08-27T10:52:08,413][INFO ][o.e.n.Node ] [TNP-ES-02] initializing ...[2019-08-27T10:52:08,646][INFO ][o.e.e.NodeEnvironment ] [TNP-ES-02] using [1] data paths, mounts [[/ (/dev/mapper/VolGroup-LV_root)]], net usable_space [73.9gb], net total_space [82gb], spins? [possibly], types [ext4][2019-08-27T10:52:08,647][INFO ][o.e.e.NodeEnvironment ] [TNP-ES-02] heap size [1.9gb], compressed ordinary object pointers [true][2019-08-27T10:52:09,026][INFO ][o.e.n.Node ] [TNP-ES-02] node name [TNP-ES-02], node ID [fpG8waVmTq-QZ2L4dqhcjg][2019-08-27T10:52:09,027][INFO ][o.e.n.Node ] [TNP-ES-02] version[5.6.11], pid[129732], build[bc3eef4/2018-08-16T15:25:17.293Z], OS[Linux/2.6.32-696.el6.x86_64/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_45/25.45-b02][2019-08-27T10:52:09,027][INFO ][o.e.n.Node ] [TNP-ES-02] JVM arguments [-Xms2g, -Xmx2g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -Djdk.io.permissionsUseCanonicalPath=true, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j.skipJansi=true, -XX:+HeapDumpOnOutOfMemoryError, -Des.path.home=/home/ex/elasticsearch-5.6.11][2019-08-27T10:52:11,187][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [aggs-matrix-stats][2019-08-27T10:52:11,187][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [ingest-common][2019-08-27T10:52:11,188][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [lang-expression][2019-08-27T10:52:11,188][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [lang-groovy][2019-08-27T10:52:11,188][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [lang-mustache][2019-08-27T10:52:11,189][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [lang-painless][2019-08-27T10:52:11,189][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [parent-join][2019-08-27T10:52:11,189][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [percolator][2019-08-27T10:52:11,189][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [reindex][2019-08-27T10:52:11,190][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [transport-netty3][2019-08-27T10:52:11,190][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded module [transport-netty4][2019-08-27T10:52:11,191][INFO ][o.e.p.PluginsService ] [TNP-ES-02] loaded plugin [analysis-ik][2019-08-27T10:52:14,154][INFO ][o.e.d.DiscoveryModule ] [TNP-ES-02] using discovery type [zen][2019-08-27T10:52:15,228][INFO ][o.e.n.Node ] [TNP-ES-02] initialized[2019-08-27T10:52:15,228][INFO ][o.e.n.Node ] [TNP-ES-02] starting ...
如果发生如下异常内容
[2019-08-27T10:52:58,069][WARN ][o.e.i.e.Engine ] [TNP-ES-02] [table_user_index_1][4] failed engine [failed to recover from translog]org.elasticsearch.index.engine.EngineException: failed to recover from translogat org.elasticsearch.index.engine.InternalEngine.recoverFromTranslog(InternalEngine.java:244) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.engine.InternalEngine.recoverFromTranslog(InternalEngine.java:221) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.engine.InternalEngine.recoverFromTranslog(InternalEngine.java:92) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard.internalPerformTranslogRecovery(IndexShard.java:1033) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard.performTranslogRecovery(IndexShard.java:987) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.StoreRecovery.internalRecoverFromStore(StoreRecovery.java:360) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromStore$0(StoreRecovery.java:90) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.StoreRecovery$$Lambda$1542/357079274.run(Unknown Source) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:257) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.StoreRecovery.recoverFromStore(StoreRecovery.java:88) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard.recoverFromStore(IndexShard.java:1236) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$1(IndexShard.java:1484) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard$$Lambda$1541/1402880171.run(Unknown Source) [elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:576) [elasticsearch-5.6.11.jar:5.6.11]at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_45]at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_45]at java.lang.Thread.run(Thread.java:745) [?:1.8.0_45]Caused by: java.io.EOFException: read past EOF. pos [9543183] length: [4] end: [9543183]at org.elasticsearch.common.io.Channels.readFromFileChannelWithEofException(Channels.java:101) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.translog.TranslogSnapshot.readBytes(TranslogSnapshot.java:90) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.translog.BaseTranslogReader.readSize(BaseTranslogReader.java:67) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.translog.TranslogSnapshot.readOperation(TranslogSnapshot.java:68) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.translog.TranslogSnapshot.next(TranslogSnapshot.java:61) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.translog.MultiSnapshot.next(MultiSnapshot.java:53) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.TranslogRecoveryPerformer.recoveryFromSnapshot(TranslogRecoveryPerformer.java:84) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.shard.IndexShard$IndexShardRecoveryPerformer.recoveryFromSnapshot(IndexShard.java:1838) ~[elasticsearch-5.6.11.jar:5.6.11]at org.elasticsearch.index.engine.InternalEngine.recoverFromTranslog(InternalEngine.java:242) ~[elasticsearch-5.6.11.jar:5.6.11]... 16 more
说明data文件夹中的节点数据信息没有删除,需要查询已有的节点信息
[ex@TEST-BS-1908-V511 nodes]$ pwd/home/ex/data/nodes[ex@TEST-BS-1908-V511 nodes]$ rm -r 0/
如果启动成功,则可以通过两种方式进行验证ES是否启动成功
如果返回如下内容,则认为启动成功
{"name" : "TNP-ES-03","cluster_name" : "TNP-ES","cluster_uuid" : "BBikXLspRBaTWJt-XAXADQ","version" : {"number" : "5.6.11","build_hash" : "bc3eef4","build_date" : "2018-08-16T15:25:17.293Z","build_snapshot" : false,"lucene_version" : "6.6.1"},"tagline" : "You Know, for Search"}
[dubbo@HZ3UAT-BS-1909-V085 conf]$ pwd/home/dubbo/canal/conf[dubbo@HZ3UAT-BS-1909-V085 conf]$ vi canal.properties# 修改内容canal.zkServers = 172.20.179.50:2181canal.mq.servers = 172.20.188.40:9876;172.20.188.41:9876[dubbo@SZ1-BS-1908-V1132 example]$ vi instance.properties[dubbo@SZ1-BS-1908-V1132 example]$ pwd/home/dubbo/canal/conf/example# 修改内容canal.instance.master.address=172.21.70.65:3306
server.port: 5601server.host: "192.168.122.40"elasticsearch.url: "http://192.168.122.40:9201"
[ex@DEV-BS-1908-V510 bin]$ chmod +x ../node/bin/node
[ex@DEV-BS-1908-V510 bin]$ ./kibana &[2] 130018[ex@DEV-BS-1908-V510 bin]$ log [02:38:06.457] [info][status][plugin:kibana@5.6.11] Status changed from uninitialized to green - Readylog [02:38:06.553] [info][status][plugin:elasticsearch@5.6.11] Status changed from uninitialized to yellow - Waiting for Elasticsearchlog [02:38:06.602] [info][status][plugin:console@5.6.11] Status changed from uninitialized to green - Readylog [02:38:06.652] [info][status][plugin:metrics@5.6.11] Status changed from uninitialized to green - Readylog [02:38:06.877] [info][status][plugin:elasticsearch@5.6.11] Status changed from yellow to green - Kibana index readylog [02:38:06.879] [info][status][plugin:timelion@5.6.11] Status changed from uninitialized to green - Readylog [02:38:06.885] [info][listening] Server running at http://192.168.122.40:5601log [02:38:06.887] [info][status][ui settings] Status changed from uninitialized to green - Ready
网页访问http://192.168.122.40:5601,查看Kibana是否启动成功