[关闭]
@WillireamAngel 2018-06-07T01:57:32.000000Z 字数 4694 阅读 2000

Ceph部署教程

Storage


主机规划

硬件配置

ceph manager :6核心 12GB 内存, 40GB SSD00 2 node 1
ceph monitor :6核心 12GB 内存, 40GB SSD001 node 2
ceph osd 0:8核心 16GB 内存 ,40GB SSD002 + 4TB *6 HDD001 node 3
ceph osd 1:8核心 16GB 内存 ,40GB SSD00 1 + 4TB *6 HDD002 node 4
ceph client & iscsi target :4核心 8GB 内存 ,30GB SSD001 node 5

网络规划

10.86.1.x: 集群网络
10.86.101.x:OSD复制网络+iscsi映射网络

功能实现

  1. Ceph集群:monitor、manager、osd*2
  2. iscsi:client
  3. monitor:dashboard

Ceph集群配置

前期准备

  1. yum install -y net-tools wget
  2. yum update
  3. hostname #修改hostname
  1. rm -f /etc/yum.repos.d/*
  1. wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
  2. wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
  1. sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址
  1. #创建ceph源
  2. echo '#阿里ceph源
  3. [ceph]
  4. name=ceph
  5. baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
  6. gpgcheck=0
  7. [ceph-noarch]
  8. name=cephnoarch
  9. baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
  10. gpgcheck=0
  11. [ceph-source]
  12. name=ceph-source
  13. baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/
  14. gpgcheck=0
  15. #'>/etc/yum.repos.d/ceph.repo
  1. yum clean all && yum makecache #生成缓存
  1. sudo yum install ntp ntpdate ntp-doc
  1. #安装openssh
  2. sudo yum install openssh-server
  1. #配置用户
  2. ssh user@ceph-server
  3. sudo useradd -d /home/{username} -m {username}
  4. sudo passwd {username}
  1. #配置sudo权限
  2. echo "{username} ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{username}
  3. sudo chmod 0440 /etc/sudoers.d/{username}
  1. #创建ssh-
  2. ssh-keygen
  3. Generating public/private key pair.
  4. Enter file in which to save the key (/ceph-admin/.ssh/id_rsa):
  5. Enter passphrase (empty for no passphrase):
  6. Enter same passphrase again:
  7. Your identification has been saved in /ceph-admin/.ssh/id_rsa.
  8. Your public key has been saved in /ceph-admin/.ssh/id_rsa.pub.
  1. #分发ssh-keygen
  2. ssh-copy-id {username}@node1
  3. ssh-copy-id {username}@node2
  4. ssh-copy-id {username}@node3
  5. ssh-copy-id {username}@node4
  6. ssh-copy-id {username}@node5
  1. #设置~/.ssh/config
  2. Host node1
  3. Hostname node1
  4. User {username}
  5. Host node2
  6. Hostname node2
  7. User {username}
  8. Host node3
  9. Hostname node3
  10. User {username}
  11. Host node4
  12. Hostname node3
  13. User {username}
  14. Host node5
  15. Hostname node3
  16. User {username}
  1. #修改hosts文件
  2. vi /etc/hosts
  3. node1 ip
  4. node2 ip
  5. node3 ip
  6. node4 ip
  7. node5 ip
  1. sed -i '/^SELINUX=.*/c SELINUX=perimissive' /etc/selinux/config
  2. sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
  3. grep --color=auto '^SELINUX' /etc/selinux/config
  4. setenforce 0
  1. sudo firewall-cmd --list-all
  2. sudo firewall-cmd --zone=public --add-port 80/tcp --permanent
  3. sudo firewall-cmd --zone=public --add-port 6800-7300/tcp --permanent
  4. sudo firewall-cmd --reload
  1. sudo yum install yum-plugin-priorities
  1. sudo yum install ceph-deploy

创建集群

以下均在管理节点操作

  1. # 创建配置文件
  2. cd
  3. mkdir my-cluster
  4. cd my-cluster
  1. #创建监控节点
  2. ceph-deploy new node2
  1. #配置网络
  2. vi ceph.conf
  3. [global]
  4. public network = 10.86.1.0/24
  5. cluster network = 10.86.101.0/24
  1. #安装ceph
  2. ceph-deploy install node1 node2 node3 node4 node5
  1. #初始化监控节点
  2. ceph-deploy mon create-initial
  1. #分发配置文件
  2. ceph-deploy admin node1 node2 node3 node4 node5
  1. #创建monitor管理节点
  2. ceph-deploy mgr create node2
  1. #创建osd data
  2. ceph-deploy osd create --data /dev/sdb node3
  3. ...
  4. ...
  5. ceph-deploy osd create --data /dev/sdg node4
  1. #查看集群状态
  2. ssh node2 sudo ceph health
  3. ssh node2 sudo ceph -s
  4. ssh node2 sudo ceph tree
  1. # 配置出错彻底删除ceph
  2. ceph-deploy purge {ceph-node} [{ceph-node}]
  3. ceph-deploy purgedata {ceph-node} [{ceph-node}]
  4. ceph-deploy forgetkeys
  5. rm ceph.*

创建块设备iscsi target

创建块设备映射

以下在管理节点操作

  1. #创建pool
  2. #参照http://docs.ceph.com/docs/master/rados/operations/pools/#create-a-pool
  3. ceph osd pool create rbd 512 replicated
  4. #设置 副本 size= 2
  5. ceph osd pool set rbd size 2
  1. #初始化pool
  2. rbd pool init

以下在client节点操作:

  1. rbd create foo -- size 50331648 -- image -feature layering [-m {mon-IP}] [feature layering]
  1. sudo rbd map foo --name client.admin [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring]
  1. # 设置自动映射
  2. vi /etc/ceph/rbdmap
  3. foo id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
  1. systemctl enable rbdmap.service
  1. #查看映射结果
  2. lsblk

创建iscsi target

  1. #安装iscsi target
  2. yum -y install targetcli
  1. #启动target服务
  2. systemctl start target
  3. #设置开机启动服务
  4. systemctl enable target
  1. #进入target配置
  2. targetcli
  1. #配置iscsi target及相关许可
  2. #参照https://blog.csdn.net/debimeng/article/details/72828780
  3. /> /backstores/block create hope1 /dev/rdb0
  4. /> /iscsi create iqn.2018-05.com.mwdserver:iscsimwd1
  5. />/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/acls create iqn.2017-05.com.mwdinit:initmwd1
  6. />/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/luns create /backstores/block/ib_vol1
  7. />/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/portals create 10.86.101.19:3260
  8. />cd /
  9. />ls
  10. />exit

创建dashboard监控

  1. #开启模块
  2. ceph mgr module enable dashboard
  1. #配置monitor模块
  2. ssh node2 vi /etc/ceph/ceph.conf
  3. [mon]
  4. mgr initial modules = dashboard
  1. #配置ip和端口(需要在防火墙开放相应端口)
  2. ceph config set mgr mgr/dashboard/$name/server_addr $IP
  3. ceph config set mgr mgr/dashboard/$name/server_port $PORT
  1. 参照:
  2. http://docs.ceph.com/docs/mimic/mgr/dashboard/

参考文章

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注