Ceph mimic 版本- 存储集群搭建Centos7
一、设置Linux主机主机名称:#每个主机配置[root@node2 ~]# hostnamectl set-hostname node2[root@node2 ~]# hostnamectl set-hostname node2[root@node2 ~]# hostnamectl set-hostname node2[root@ceph-mon0 ~]# hostnamectl...
·
一、设置Linux主机基本配置:
#为每台主机配置主机名称
hostnamectl set-hostname "主机名"
[root@node0 ~]#cat << EOF >> /etc/hosts
192.168.10.14 ceph-deploy
192.168.10.13 ceph-mon0
192.168.10.12 node2
192.168.10.11 node1
192.168.10.10 node0
EOF
#测试ping
[ceph@ceph-deploy ~]$ for i in {ceph-mon0,node1,node2,node0,ceph-deploy} ;do ping -c1 $i ;done
配置ceph源:http://mirrors.163.com/ceph/
#配置ceph源
[root@ceph-deploy ~]# cat /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.163.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.163.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.163.com/ceph/keys/release.asc
priority=1
关闭防火墙或者开放 6789/6800~6900 端口、关闭 SELINUX
setenforce 0
#修改 SELINUX 模式
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 防火墙设置
$ sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent
# 或者关闭防火墙
$ sudo systemctl stop firewalld.service #停止 firewall
$ sudo systemctl disable firewalld.service #禁止 firewall 开机启动
配置 ntp 服务,开启时间服务,保证集群服务器时间统一;
yum install ntp ntpdate ntp-doc
系统优化类
#set max user processes
sed -i 's/4096/102400/' /etc/security/limits.d/20-nproc.conf
#set ulimit
grep "ulimit -SHn 102400" /etc/rc.local|| echo "ulimit -SHn 102400" >> /etc/rc.local
#修改最大打开文件句柄数
grep "^* - sigpending 256612" /etc/security/limits.conf ||
cat >>/etc/security/limits.conf<<EOF
* soft nofile 102400
* hard nofile 102400
* soft nproc 102400
* hard nproc 102400
* - sigpending 256612
EOF
创建 Ceph 部署用户:ceph-deploy 工具必须以普通用户登录 Ceph 节点;
# 在 Ceph 集群各节点进行如下操作
# 创建 ceph 特定用户
$ sudo useradd -d /home/ceph -m ceph
$ sudo echo "123456" | passwd --stdin ceph
# 添加 sudo 权限
echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
sudo chmod 0440 /etc/sudoers.d/ceph
#每台主机上配置免密钥互信
ssh-keygen -t dsa -f ~/.ssh/id_dsa -P ''
ssh-copy-id "主机名"
#修改 ceph-deploy 管理节点上的 ~/.ssh/config 文件
[ceph@ceph-deploy ~]$ cat .ssh/config
Host node1
Hostname node1
User ceph
Host node2
Hostname node2
User ceph
Host node0
Hostname node0
User ceph
Host ceph-mon0
Hostname ceph-mon0
User ceph
Host ceph-deploy
Hostname ceph-deploy
User ceph
备注:需要改下权限 chmod 600 config
#测试
[ceph@ceph-deploy ~]$ for i in {ceph-mon0,node1,node2,node0,ceph-deploy} ;do ssh $i hostname;done
ceph-mon0
node1
node2
node0
ceph-deploy
二、Ceph 存储集群搭建
1、 创建集群;ceph-deploy new ceph-mon0
#错误1:
[ceph@ceph-deploy ~]$ ceph-deploy new ceph-mon0
Traceback (most recent call last):
File "/bin/ceph-deploy", line 18, in <module>
from ceph_deploy.cli import main
File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
import pkg_resources
ImportError: No module named pkg_resources
#解决办法:重新安装pip解决
[ceph@ceph-deploy ceph-cluster]$ sudo yum install python2-pip
[ceph@ceph-deploy ceph-cluster]$ ceph-deploy new ceph-mon0
...忽略...
[ceph_deploy.new][DEBUG ] Resolving host ceph-mon0
[ceph_deploy.new][DEBUG ] Monitor ceph-mon0 at 192.168.10.13
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-mon0']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.10.13']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[ceph@ceph-deploy ceph-cluster]$ ls #查看文件
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
由于我们osd节点只有两个,需要修改副本数量;
osd pool default size = 2
[ceph@ceph-deploy ceph-cluster]$ cat ceph.conf
[global]
fsid = 9c7c907b-dcbe-4ab1-b103-17f9d8aa2c2d
mon_initial_members = ceph-mon0
mon_host = 192.168.10.13
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd pool default size = 2 #添加项
2、安装ceph:使用ceph-deploy为节点安装ceph
[ceph@ceph-deploy ceph-cluster]$ ceph-deploy install ceph-deploy ceph-mon0 node0 node1 node2
#忽略
[node2][DEBUG ] Complete!
[node2][INFO ] Running command: sudo ceph --version
[node2][DEBUG ] ceph version 13.2.1 (5533ecdc0fda920179d7ad84e0aa65a127b20d77) mimic (stable)
#ceph-deploy 将在各节点安装 Ceph 。
#注:如果你执行过 ceph-deploy purge ,你必须重新执行这一步来安装 Ceph
3、配置初始 monitor(s)、并收集所有密钥
[ceph@ceph-deploy ceph-cluster]$ ceph-deploy mon create-initial
[ceph_deploy.gatherkeys][INFO ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mgr.keyring
[ceph_deploy.gatherkeys][INFO ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmp7RWLGd
#完成上述操作后,当前目录里应该会出现这些密钥环
[ceph@ceph-deploy ceph-cluster]$ ll
total 320
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-mds.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-mgr.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-osd.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-rgw.keyring
-rw------- 1 ceph ceph 151 Sep 4 23:25 ceph.client.admin.keyring
-rw-rw-r-- 1 ceph ceph 225 Sep 4 22:59 ceph.conf
-rw-rw-r-- 1 ceph ceph 295037 Sep 4 23:25 ceph-deploy-ceph.log
-rw------- 1 ceph ceph 73 Sep 4 22:53 ceph.mon.keyring
4、创建 ceph 管理进程服务
# ceph-deploy mgr create ceph-mon0 ceph-deploy node0, node1, node2
# ceph -s
5、创建OSD节点
#查看节点硬盘信息
[ceph@ceph-deploy ceph-cluster]$ ceph-deploy disk list node0 node1 node2
[root@node1 ~]# lsblk
sdb 8:16 0 20G 0 disk
└─ceph--f805b345--30ad--4006--86e8--f00aac00eeb5-osd--block--068c7bab--2130--4862--8693--3e54f6b0357c 253:2 0 20G 0 lvm
# ceph-deploy osd create --data /dev/sdb node1
# ceph-deploy osd create --data /dev/sdb node2
# ceph-deploy osd create --data /dev/sdb node3
# ceph -s
[ceph@ceph-deploy ceph-cluster]$ sudo ceph -s
cluster:
id: 8345c764-cc94-402f-83f7-d4db29d79f89
health: HEALTH_WARN
no active mgr
services:
mon: 2 daemons, quorum ceph-mon0,ceph-deploy
mgr: ceph-mon0(active), standbys: ceph-deploy, node0, node1, node2
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
#完成创建
[ceph@ceph-deploy ceph-cluster]$ sudo ceph health
HEALTH_OK
三、启用Dashboard
#使用如下命令即可启用dashboard模块:
$ sudo ceph mgr module enable dashboard
默认情况下,仪表板的所有HTTP连接均使用SSL/TLS进行保护。
要快速启动并运行仪表板,可以使用以下内置命令生成并安装自签名证书:
$ sudo ceph dashboard create-self-signed-cert
Self-signed certificate created
创建具有管理员角色的用户:
$sudo ceph dashboard set-login-credentials admin admin
Username and password updated
查看ceph-mgr服务:
默认下,仪表板的守护程序(即当前活动的管理器)将绑定到TCP端口8443或8080
[ceph@ceph-mon0 ~]$ sudo ceph mgr services
{
"dashboard": "https://ceph-mon0:8443/"
}
启用Prometheus模块 和grafana 关联
[ceph@ceph-mon0 ~]$ sudo ceph mgr module enable prometheus
[ceph@ceph-mon0 ~]$ ss -tlnp |grep 9283
LISTEN 0 5 :::9283 :::* users:(("ceph-mgr",pid=43370,fd=74))
[ceph@ceph-mon0 ~]$ sudo ceph mgr services
{
"dashboard": "https://ceph-mon0:8443/",
"prometheus": "http://ceph-mon0:9283/"
}
安装Prometheus:
# tar -zxvf prometheus-*.tar.gz
# cd prometheus-*
# cp prometheus promtool /usr/local/bin/
# prometheus --version
prometheus, version 2.3.2 (branch: HEAD, revision: 71af5e29e815795e9dd14742ee7725682fa14b7b)
build user: root@5258e0bd9cc1
build date: 20180712-14:02:52
go version: go1.10.3
# mkdir /etc/prometheus && mkdir /var/lib/prometheus
# vim /usr/lib/systemd/system/prometheus.service ###配置启动项
[Unit]
Description=Prometheus
Documentation=https://prometheus.io
[Service]
Type=simple
WorkingDirectory=/var/lib/prometheus
EnvironmentFile=-/etc/prometheus/prometheus.yml
ExecStart=/usr/local/bin/prometheus \
--config.file /etc/prometheus/prometheus.yml \
--storage.tsdb.path /var/lib/prometheus/
[Install]
WantedBy=multi-user.target
# vim /etc/prometheus/prometheus.yml ##配置配置文件
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['192.168.100.116:9090']
- job_name: 'ceph'
static_configs:
- targets:
- 192.168.100.116:9283
- 192.168.100.117:9283
- 192.168.100.118:9283
# systemctl daemon-reload
# systemctl start prometheus
# systemctl status prometheus
更多推荐
已为社区贡献2条内容
所有评论(0)