@WillireamAngel
2018-06-07T01:57:32.000000Z
字数 4694
阅读 2000
Storage
ceph manager :6核心 12GB 内存, 40GB SSD00 2 node 1
ceph monitor :6核心 12GB 内存, 40GB SSD001 node 2
ceph osd 0:8核心 16GB 内存 ,40GB SSD002 + 4TB *6 HDD001 node 3
ceph osd 1:8核心 16GB 内存 ,40GB SSD00 1 + 4TB *6 HDD002 node 4
ceph client & iscsi target :4核心 8GB 内存 ,30GB SSD001 node 5
10.86.1.x: 集群网络
10.86.101.x:OSD复制网络+iscsi映射网络
net-tools``wget
并做相关更新:
yum install -y net-tools wget
yum update
hostname #修改hostname
rm -f /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址
#创建ceph源
echo '#阿里ceph源
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
[ceph-source]
name=ceph-source
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/
gpgcheck=0
#'>/etc/yum.repos.d/ceph.repo
yum clean all && yum makecache #生成缓存
sudo yum install ntp ntpdate ntp-doc
#安装openssh
sudo yum install openssh-server
#配置用户
ssh user@ceph-server
sudo useradd -d /home/{username} -m {username}
sudo passwd {username}
#配置sudo权限
echo "{username} ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{username}
sudo chmod 0440 /etc/sudoers.d/{username}
#创建ssh-
ssh-keygen
Generating public/private key pair.
Enter file in which to save the key (/ceph-admin/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /ceph-admin/.ssh/id_rsa.
Your public key has been saved in /ceph-admin/.ssh/id_rsa.pub.
#分发ssh-keygen
ssh-copy-id {username}@node1
ssh-copy-id {username}@node2
ssh-copy-id {username}@node3
ssh-copy-id {username}@node4
ssh-copy-id {username}@node5
#设置~/.ssh/config
Host node1
Hostname node1
User {username}
Host node2
Hostname node2
User {username}
Host node3
Hostname node3
User {username}
Host node4
Hostname node3
User {username}
Host node5
Hostname node3
User {username}
#修改hosts文件
vi /etc/hosts
node1 ip
node2 ip
node3 ip
node4 ip
node5 ip
sed -i '/^SELINUX=.*/c SELINUX=perimissive' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
sudo firewall-cmd --list-all
sudo firewall-cmd --zone=public --add-port 80/tcp --permanent
sudo firewall-cmd --zone=public --add-port 6800-7300/tcp --permanent
sudo firewall-cmd --reload
sudo yum install yum-plugin-priorities
sudo yum install ceph-deploy
以下均在管理节点操作
# 创建配置文件
cd
mkdir my-cluster
cd my-cluster
#创建监控节点
ceph-deploy new node2
#配置网络
vi ceph.conf
[global]
public network = 10.86.1.0/24
cluster network = 10.86.101.0/24
#安装ceph
ceph-deploy install node1 node2 node3 node4 node5
#初始化监控节点
ceph-deploy mon create-initial
#分发配置文件
ceph-deploy admin node1 node2 node3 node4 node5
#创建monitor管理节点
ceph-deploy mgr create node2
#创建osd data
ceph-deploy osd create --data /dev/sdb node3
...
...
ceph-deploy osd create --data /dev/sdg node4
#查看集群状态
ssh node2 sudo ceph health
ssh node2 sudo ceph -s
ssh node2 sudo ceph tree
# 配置出错彻底删除ceph
ceph-deploy purge {ceph-node} [{ceph-node}]
ceph-deploy purgedata {ceph-node} [{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
以下在管理节点操作
#创建pool
#参照http://docs.ceph.com/docs/master/rados/operations/pools/#create-a-pool
ceph osd pool create rbd 512 replicated
#设置 副本 size= 2
ceph osd pool set rbd size 2
#初始化pool
rbd pool init
以下在client节点操作:
rbd create foo -- size 50331648 -- image -feature layering [-m {mon-IP}] [feature layering]
sudo rbd map foo --name client.admin [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring]
# 设置自动映射
vi /etc/ceph/rbdmap
foo id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service
#查看映射结果
lsblk
#安装iscsi target
yum -y install targetcli
#启动target服务
systemctl start target
#设置开机启动服务
systemctl enable target
#进入target配置
targetcli
#配置iscsi target及相关许可
#参照https://blog.csdn.net/debimeng/article/details/72828780
/> /backstores/block create hope1 /dev/rdb0
/> /iscsi create iqn.2018-05.com.mwdserver:iscsimwd1
/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/acls create iqn.2017-05.com.mwdinit:initmwd1
/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/luns create /backstores/block/ib_vol1
/>/iscsi/iqn.2018-05.com.mwdserver:iscsimwd1/tpg1/portals create 10.86.101.19:3260
/>cd /
/>ls
/>exit
#开启模块
ceph mgr module enable dashboard
#配置monitor模块
ssh node2 vi /etc/ceph/ceph.conf
[mon]
mgr initial modules = dashboard
#配置ip和端口(需要在防火墙开放相应端口)
ceph config set mgr mgr/dashboard/$name/server_addr $IP
ceph config set mgr mgr/dashboard/$name/server_port $PORT
参照:
http://docs.ceph.com/docs/mimic/mgr/dashboard/