@WillireamAngel
2018-06-07T01:57:32.000000Z
字数 4694
阅读 2104
Storage
ceph manager :6核心 12GB 内存, 40GB SSD00 2 node 1
ceph monitor :6核心 12GB 内存, 40GB SSD001 node 2
ceph osd 0:8核心 16GB 内存 ,40GB SSD002 + 4TB *6 HDD001 node 3
ceph osd 1:8核心 16GB 内存 ,40GB SSD00 1 + 4TB *6 HDD002 node 4
ceph client & iscsi target :4核心 8GB 内存 ,30GB SSD001 node 5
10.86.1.x: 集群网络
10.86.101.x:OSD复制网络+iscsi映射网络
net-tools``wget并做相关更新:
yum install -y net-tools wgetyum updatehostname #修改hostname
rm -f /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repowget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址
#创建ceph源echo '#阿里ceph源[ceph]name=cephbaseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/gpgcheck=0[ceph-noarch]name=cephnoarchbaseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/gpgcheck=0[ceph-source]name=ceph-sourcebaseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/gpgcheck=0#'>/etc/yum.repos.d/ceph.repo
yum clean all && yum makecache #生成缓存
sudo yum install ntp ntpdate ntp-doc
#安装opensshsudo yum install openssh-server
#配置用户ssh user@ceph-serversudo useradd -d /home/{username} -m {username}sudo passwd {username}
#配置sudo权限echo "{username} ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{username}sudo chmod 0440 /etc/sudoers.d/{username}
#创建ssh-ssh-keygenGenerating public/private key pair.Enter file in which to save the key (/ceph-admin/.ssh/id_rsa):Enter passphrase (empty for no passphrase):Enter same passphrase again:Your identification has been saved in /ceph-admin/.ssh/id_rsa.Your public key has been saved in /ceph-admin/.ssh/id_rsa.pub.
#分发ssh-keygenssh-copy-id {username}@node1ssh-copy-id {username}@node2ssh-copy-id {username}@node3ssh-copy-id {username}@node4ssh-copy-id {username}@node5
#设置~/.ssh/configHost node1Hostname node1User {username}Host node2Hostname node2User {username}Host node3Hostname node3User {username}Host node4Hostname node3User {username}Host node5Hostname node3User {username}
#修改hosts文件vi /etc/hostsnode1 ipnode2 ipnode3 ipnode4 ipnode5 ip
sed -i '/^SELINUX=.*/c SELINUX=perimissive' /etc/selinux/configsed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/configgrep --color=auto '^SELINUX' /etc/selinux/configsetenforce 0
sudo firewall-cmd --list-allsudo firewall-cmd --zone=public --add-port 80/tcp --permanentsudo firewall-cmd --zone=public --add-port 6800-7300/tcp --permanentsudo firewall-cmd --reload
sudo yum install yum-plugin-priorities
sudo yum install ceph-deploy
以下均在管理节点操作
# 创建配置文件cdmkdir my-clustercd my-cluster
#创建监控节点ceph-deploy new node2
#配置网络vi ceph.conf[global]public network = 10.86.1.0/24cluster network = 10.86.101.0/24
#安装cephceph-deploy install node1 node2 node3 node4 node5
#初始化监控节点ceph-deploy mon create-initial
#分发配置文件ceph-deploy admin node1 node2 node3 node4 node5
#创建monitor管理节点ceph-deploy mgr create node2
#创建osd dataceph-deploy osd create --data /dev/sdb node3......ceph-deploy osd create --data /dev/sdg node4
#查看集群状态ssh node2 sudo ceph healthssh node2 sudo ceph -sssh node2 sudo ceph tree
# 配置出错彻底删除cephceph-deploy purge {ceph-node} [{ceph-node}]ceph-deploy purgedata {ceph-node} [{ceph-node}]ceph-deploy forgetkeysrm ceph.*
以下在管理节点操作
#创建pool#参照http://docs.ceph.com/docs/master/rados/operations/pools/#create-a-poolceph osd pool create rbd 512 replicated#设置 副本 size= 2ceph osd pool set rbd size 2
#初始化poolrbd pool init
以下在client节点操作:
rbd create foo -- size 50331648 -- image -feature layering [-m {mon-IP}] [feature layering]
sudo rbd map foo --name client.admin [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring]
# 设置自动映射vi /etc/ceph/rbdmapfoo id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service
#查看映射结果lsblk
#安装iscsi targetyum -y install targetcli
#启动target服务systemctl start target#设置开机启动服务systemctl enable target
#进入target配置targetcli
#配置iscsi target及相关许可#参照https://blog.csdn.net/debimeng/article/details/72828780/> /backstores/block create hope1 /dev/rdb0/> /iscsi create iqn.2018-05.com.mwdserver:iscsimwd1/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/acls create iqn.2017-05.com.mwdinit:initmwd1/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/luns create /backstores/block/ib_vol1/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/portals create 10.86.101.19:3260/>cd //>ls/>exit
#开启模块ceph mgr module enable dashboard
#配置monitor模块ssh node2 vi /etc/ceph/ceph.conf[mon]mgr initial modules = dashboard
#配置ip和端口(需要在防火墙开放相应端口)ceph config set mgr mgr/dashboard/$name/server_addr $IPceph config set mgr mgr/dashboard/$name/server_port $PORT
参照:http://docs.ceph.com/docs/mimic/mgr/dashboard/