@GEEKO
2018-12-13T09:14:29.000000Z
字数 6133
阅读 436
Docker
docker inspect mysql
docker volume rm $(docker volume ls -qf dangling=true)
docker rm -v mysql
docker run --name mysql2 --volumes-from mysql -d mysql:5.7
推送到私有镜像
1、docker tag identity:latest registry.jinri.cn:50000/identity:latest
2、docker push registry.jinri.cn:50000/identity:latest
修改镜像名称
docker tag registry.jinri.cn/identity:latest identity:latest
docker run --name app.identity4 -v /app:/app/Configuration -p 58316:80 -d de66b7184794
docker inspect
mysql2发现,mysql2的/var/lib/mysql和mysql的/var/lib/mysql都挂载到了相同的目录下,因此在mysql2中可以复用mysql的数据:
"Mounts": [
{
"Name": "8827c361d103c1272907da0b82268310415f8b075b67854f27dbca0b59a31a1a",
"Source": "/mnt/sda1/var/lib/docker/volumes/8827c361d103c1272907da0b82268310415f8b075b67854 f27dbca0b59a31a1a/_data",
"Destination": "/var/lib/mysql",
"Driver": "local",
"Mode": "",
"RW": true,
"Propagation": ""
} ]
值得注意的是,虽然/var/lib/mysql没有在run的指令中出现,但其出现在了生成mysql:5.7镜像的Dockerfile中,所以即使在run时忘记使用volume,该目录依然能够从宿主机直接访问,官方真是用心良苦啊。另外,由于复用了mysql的所有数据,因此连接数据库用户名密码也和mysql一样,而不会是run时传入的参数,一切都和原来mysql中的一样。
DOCKER_OPTS="-g /gkdata/docker"
docker run -t -i ubuntu /bin/bash
docker run -it ubuntu bash
docker stats
如果不想持续的监控容器使用资源的情况,可以通过 --no-stream 选项只输出当前的状态
docker stats --no-stream
docker inspect elasticsearch |grep IPAddress
docker pull mongo
docker run --name some-mongo -p 27017:27017 -d mongo --auth
//这里的--name 放在前面并映射端口docker exec -it 容器ID /bin/bash
//进入容器
mongo
use admin
db.createUser({user:"root",pwd:"root",roles:[{role:'root',db:'admin'}]}) //创建用户,此用户创建成功,则后续操作都需要用户认证
exit
docker pull mysql
docker run --name mysql \
-p 3306:3306 \
-v //gkdata/config/mysql/conf.d:/etc/mysql/conf.d \
-v //gkdata/data/mysql:/var/lib/mysql \
-v //gkdata/logs/mysql/:/var/log \
-e MYSQL_ROOT_PASSWORD=123456 \
--privileged=true \
-d mysql
docker run --name mysql -p 3306:3306 -v /var/mysql/data:/var/lib/mysql -v /var/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 --privileged=true -d mysql
配置文件:my.conf
[mysqldump]
user=root
password=123456
[mysqld]
max_allowed_packet=8M
lower_case_table_names=1
character_set_server=utf8
max_connections=900
max_connect_errors=600
default-character-set=utf8
问题: mysql client does not support authentication protocol
解决方法:
1、进入容器打开数据库:mysql -h192.168.2.15 -p3306 -uroot -p
2、修改密码:alter user 'root'@'%' identified with mysql_native_password by '123456';
问题:authentication plugin 'caching_sha2_password' -navicat连接异常问题解决
解决方案:
docker exec -it mysql bash
mysql -h192.168.3.116 -p3306 -uroot -p
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '新密码';
docker run -p 6379:6379 --name redis -v /wdc/docker/redis/redis.conf:/etc/redis/redis.conf -v /wdc/docker/redis/data:/data -d --restart=always redis redis-server /etc/redis/redis.conf --appendonly yes --requirepass "Zhengbolove@621218%"
redis-server –appendonly yes : 在容器执行redis-server启动命令,并打开redis持久化配置
requirepass “your passwd” :设置认证密码
–restart=always : 随docker启动而启动
1、docker network create proget
2、
docker run -d \
-v /etc/localtime:/etc/localtime:ro \
-v /home/jesn/docker/postgresql/db:/var/lib/postgresql/data \
--net=proget --name=proget-postgres --restart=unless-stopped postgres:9.5
3、
docker run -d \
-v /etc/localtime:/etc/localtime:ro \
-v /home/jesn/docker/proget/packages:/var/proget/packages \
-v /home/jesn/docker/proget/extensions:/var/proget/extensions -p 8059:80 \
--net=proget --name=proget \
--restart=unless-stopped inedo/proget:latest
docker run -d \
-p 8083:8083 \
-p 8086:8086 \
-e ADMIN_USER="root" \
-e INFLUXDB_INIT_PWD="123456"
-v /home/jesn/docker/influxdb/:/data \
--name influxdb \
tutum/influxdb:latest
docker run -d --name elasticsearch \
-v /wdc/docker/elasticsearch/data:/usr/share/elasticsearch/data \
-v /wdc/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-v /wdc/docker/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-e ES_JAVA_OPTS="-Xms256m -Xmx2g" \
-p 9200:9200 -p 9300:9300 elasticsearch
elasticsearch.yml文件内容
http.host: 0.0.0.0
#Uncomment the following lines for a production cluster deployment
#transport.host: 0.0.0.0
#discovery.zen.minimum_master_nodes: 1
docker run -d --name kibana -e ELASTICSEARCH_URL=http://192.168.3.116:9200 -p 5601:5601 kibana
docker run -d --expose 5044 -p 5044:5044 --name logstash -v "$PWD":/config-dir logstash -f /config-dir/logstash.conf
# 新建logstash.conf文件
input {
beats {
port => 5044
}
}
output {
stdout {
codec => rubydebug
}
elasticsearch {
#填写实际情况elasticsearch的访问IP,因为是跨容器间的访问,使用内网、公网IP,不要填写127.0.0.1|localhost
hosts => ["192.168.3.116:9200"]
}
}
docker run -v filebeat.yml:/filebeat.yml prima/filebeat
filebeat.inputs:
encoding: GB2312
## 单节点部署
docker run -d -p 8500:8500 -p 8300:8300 -h node1 --name consul-1 consul agent -server -bootstrap-expect=1 -node=node1 -client 0.0.0.0 -ui
FROM jenkins
USER root
#清除了基础镜像设置的源,切换成腾讯云的jessie源
#使用非腾讯云环境的需要将 aliyun 改为 aliyun
RUN echo '' > /etc/apt/sources.list.d/jessie-backports.list \
&& echo "deb http://mirrors.aliyun.com/debian jessie main contrib non-free" > /etc/apt/sources.list \
&& echo "deb http://mirrors.aliyun.com/debian jessie-updates main contrib non-free" >> /etc/apt/sources.list \
&& echo "deb http://mirrors.aliyun.com/debian-security jessie/updates main contrib non-free" >> /etc/apt/sources.list
#更新源并安装缺少的包
RUN apt-get update && apt-get install -y libltdl7 && apt-get update
ARG dockerGid=999
RUN echo "docker:x:${dockerGid}:jenkins" >> /etc/group
# 安装 docker-compose 因为等下构建环境的需要
curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
RUN chmod +x /usr/local/bin/docker-compose
# docker-compose 安装
# https://docs.docker.com/compose/install/#where-to-go-next
# 测试docker-compose安装是否成功
# docker-compose --version
docker build -t mkm-jenkins .
docker run --name jenkins -p 8085:8080 -p 50000:50000 -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/usr/bin/docker -v /wdc/docker/jenkins/jenkins_home:/var/jenkins_home -d d1cf34ad7a7f
docker run -it -d --name dockerUI -p 9500:9000 -v /var/run/docker.sock:/var/run/docker.sock docker.io/uifd/ui-for-docker