Reference:
安裝步驟:
mkdir -p /etc/ceph/ /var/lib/ceph/
#disable selinux (測試環境先關掉,實際上線使用需要再修改)
vi /etc/selinux/config
SELINUX=disabled
setenforce 0
systemctl stop firewalld
systemctl disable firewalld
mkdir -p /mnt/cephfs
#disable selinux (測試環境先關掉,實際上線使用需要再修改)
vi /etc/selinux/config
SELINUX=disabled
setenforce 0
systemctl stop firewalld
systemctl disable firewalld
mkdir -p /mnt/cephfs
#安裝docker, docker-compose
yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
systemctl start docker
systemctl enable docker
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
vi /etc/docker/daemon.json
{
"insecure-registries": ["<your_IP or hostname>"]
}
systemctl restart docker
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
systemctl start docker
systemctl enable docker
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
vi /etc/docker/daemon.json
{
"insecure-registries": ["<your_IP or hostname>"]
}
systemctl restart docker
#download ceph docker image
docker login <private repository> -u admin
docker pull <private repository>/ceph/daemon:latest-luminous
docker pull <private repository>/ceph/daemon:latest-luminous
#docker pull ceph/daemon:latest-luminous
mkdir /root/ceph
cd /root/ceph
#清除之前的安裝設定
rm -rf /etc/ceph/* && rm -rf /var/lib/ceph/*
vi docker-compose.yml
# 要修改 MON_IP
version: '3'
services:
# 部署 Monitor 模块
ceph-mon:
# 镜像已经下载到harbor私服
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mon
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
environment:
# 当前服务器的IP, 在不同的服务器部署不要忘记改IP
MON_IP: 192.168.116.136
CEPH_PUBLIC_NETWORK: 192.168.116.0/24
command: mon
# 部署 Manager daemon 模块
ceph-mgr:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mgr
restart: always
depends_on:
- ceph-mon
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
command: mgr
# 部署 OSD 模块
ceph-osd:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-osd
restart: always
privileged: true
# 跟主机系统共享进程命名空间。打开该选项的容器可以相互通过进程 ID 来访问和操作。
pid: host
depends_on:
- ceph-mgr
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
- /dev/:/dev/
# 单独指定osd数据空间, 如果不单独指定,默认在/var/lib/ceph目录下
#-/mnt/cephfs://var/lib/ceph/osd
command: osd_directory
# 部署 MDS 模块 (用来支持 CephFS文件系统存储, 根据实际情况选用,非必须)
ceph-mds:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mds
restart: always
depends_on:
- ceph-osd
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
environment:
# 0表示不自动创建文件系统(推荐),1表示自动创建
CEPHFS_CREATE: 1
command: mds
# 部署 Rados Gateway 模块 (用来支持 对象存储, 根据实际情况选用,非必须)
ceph-rgw:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-rgw
restart: always
depends_on:
- ceph-osd
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
command: rgw
# # 部署 RBD mirror 模块 (用来支持 块存储, 根据实际情况选用,非必须)
# ceph-rbd:
# image: <private repository>/ceph/daemon:latest-luminous
# network_mode: host
# container_name: ceph-rbd
# restart: always
# volumes:
# - /etc/localtime:/etc/localtime
# - /etc/ceph:/etc/ceph
# - /var/lib/ceph/:/var/lib/ceph
# depends_on:
# - ceph-osd
# command: rbd_mirror
mkdir /root/ceph
cd /root/ceph
#清除之前的安裝設定
rm -rf /etc/ceph/* && rm -rf /var/lib/ceph/*
vi docker-compose.yml
# 要修改 MON_IP
version: '3'
services:
# 部署 Monitor 模块
ceph-mon:
# 镜像已经下载到harbor私服
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mon
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
environment:
# 当前服务器的IP, 在不同的服务器部署不要忘记改IP
MON_IP: 192.168.116.136
CEPH_PUBLIC_NETWORK: 192.168.116.0/24
command: mon
# 部署 Manager daemon 模块
ceph-mgr:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mgr
restart: always
depends_on:
- ceph-mon
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
command: mgr
# 部署 OSD 模块
ceph-osd:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-osd
restart: always
privileged: true
# 跟主机系统共享进程命名空间。打开该选项的容器可以相互通过进程 ID 来访问和操作。
pid: host
depends_on:
- ceph-mgr
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
- /dev/:/dev/
# 单独指定osd数据空间, 如果不单独指定,默认在/var/lib/ceph目录下
#-/mnt/cephfs://var/lib/ceph/osd
command: osd_directory
# 部署 MDS 模块 (用来支持 CephFS文件系统存储, 根据实际情况选用,非必须)
ceph-mds:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-mds
restart: always
depends_on:
- ceph-osd
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
environment:
# 0表示不自动创建文件系统(推荐),1表示自动创建
CEPHFS_CREATE: 1
command: mds
# 部署 Rados Gateway 模块 (用来支持 对象存储, 根据实际情况选用,非必须)
ceph-rgw:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-rgw
restart: always
depends_on:
- ceph-osd
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
command: rgw
# # 部署 RBD mirror 模块 (用来支持 块存储, 根据实际情况选用,非必须)
# ceph-rbd:
# image: <private repository>/ceph/daemon:latest-luminous
# network_mode: host
# container_name: ceph-rbd
# restart: always
# volumes:
# - /etc/localtime:/etc/localtime
# - /etc/ceph:/etc/ceph
# - /var/lib/ceph/:/var/lib/ceph
# depends_on:
# - ceph-osd
# command: rbd_mirror
docker-compose up -d
#on node2, node3 add to cluster
scp -r <node1>:/etc/ceph/* /etc/ceph/ && scp -r <node1>:/var/lib/ceph/bootstrap-* /var/lib/ceph/
docker-compose up -d
查看啟動狀況
docker ps -a
docker-compose exec ceph-osd ceph osd tree
docker-compose exec ceph-mon ceph -s
docker-compose exec ceph-mgr ceph -s
docker-compose exec ceph-mds ceph -s
docker-compose exec ceph-rgw ceph -s
docker-compose exec ceph-rbd ceph -s
啟動 dashboard on node1
docker-compose exec ceph-mon ceph mgr dump
docker-compose exec ceph-mgr ceph mgr module enable dashboard
http://<node1>:7000
#on node2, node3 add to cluster
scp -r <node1>:/etc/ceph/* /etc/ceph/ && scp -r <node1>:/var/lib/ceph/bootstrap-* /var/lib/ceph/
docker-compose up -d
查看啟動狀況
docker ps -a
docker-compose exec ceph-osd ceph osd tree
docker-compose exec ceph-mon ceph -s
docker-compose exec ceph-mgr ceph -s
docker-compose exec ceph-mds ceph -s
docker-compose exec ceph-rgw ceph -s
docker-compose exec ceph-rbd ceph -s
啟動 dashboard on node1
docker-compose exec ceph-mon ceph mgr dump
docker-compose exec ceph-mgr ceph mgr module enable dashboard
http://<node1>:7000
#加掛硬碟到 OSD
cd /root/ceph
mkdir /ceph
mkdir /ceph/sdb
fdisk /dev/sdb # n (new, 其他參數用 default) / w (write)
mkfs.xfs -f /dev/sdb
mount /dev/sdb /ceph/sdb
vi /etc/fstab
/dev/sdb /ceph/sdb xfs defaults 0 0
vi /root/ceph/docker-compose.yml
# 部署 OSD 模块
ceph-osd-b:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-osd-b
restart: always
privileged: true
# 跟主机系统共享进程命名空间。打开该选项的容器可以相互通过进程 ID 来访问和操作。
pid: host
depends_on:
- ceph-mgr
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
- /ceph/sdb:/var/lib/ceph/osd
- /dev/:/dev/
# 单独指定osd数据空间, 如果不单独指定,默认在/var/lib/ceph目录下
#-/mnt/cephfs://var/lib/ceph/osd
command: osd_directory
docker-compose up -d ceph-osd-b
#如果要刪除OSD的指令
docker-compose exec ceph-osd ceph osd crush remove {name}
docker-compose exec ceph-osd ceph osd rm {osd-num}
docker-compose exec ceph-osd ceph auth del osd.{osd-num}
cd /root/ceph
mkdir /ceph
mkdir /ceph/sdb
fdisk /dev/sdb # n (new, 其他參數用 default) / w (write)
mkfs.xfs -f /dev/sdb
mount /dev/sdb /ceph/sdb
vi /etc/fstab
/dev/sdb /ceph/sdb xfs defaults 0 0
vi /root/ceph/docker-compose.yml
# 部署 OSD 模块
ceph-osd-b:
image: <private repository>/ceph/daemon:latest-luminous
network_mode: host
container_name: ceph-osd-b
restart: always
privileged: true
# 跟主机系统共享进程命名空间。打开该选项的容器可以相互通过进程 ID 来访问和操作。
pid: host
depends_on:
- ceph-mgr
volumes:
- /etc/localtime:/etc/localtime
- /etc/ceph:/etc/ceph
- /var/lib/ceph/:/var/lib/ceph
- /ceph/sdb:/var/lib/ceph/osd
- /dev/:/dev/
# 单独指定osd数据空间, 如果不单独指定,默认在/var/lib/ceph目录下
#-/mnt/cephfs://var/lib/ceph/osd
command: osd_directory
docker-compose up -d ceph-osd-b
#如果要刪除OSD的指令
docker-compose exec ceph-osd ceph osd crush remove {name}
docker-compose exec ceph-osd ceph osd rm {osd-num}
docker-compose exec ceph-osd ceph auth del osd.{osd-num}
#檢查 PG 狀態
docker-compose exec ceph-mon ceph pg dump
#建立帳號
cd /root/ceph/
docker-compose exec ceph-osd bash
radosgw-admin user create --uid={username} --display-name="{display-name}" [--email={email}]
radosgw-admin user info --uid={username}
radosgw-admin user create --uid={username} --display-name="{display-name}" [--email={email}]
radosgw-admin user info --uid={username}
radosgw-admin key create --uid={username} --key-type=s3 --access-key myAccessKey --secret-key mySecretKey
radosgw-admin user list
#設定quota
radosgw-admin quota set --quota-scope=user --uid={username} [--max-objects=1024] [--max-size=1024B]
radosgw-admin quota enable --quota-scope=user --uid={username}
#使用 S3 browser 的連線方式如下:
REST endpoint: <node1>:7480
Access key ID: xxxxxxxxxxxxxxxxxxxx
Secred access key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
不使用 SSL/TLS
#使用 S3 browser 的連線方式如下:
REST endpoint: <node1>:7480
Access key ID: xxxxxxxxxxxxxxxxxxxx
Secred access key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
不使用 SSL/TLS
註:
ceph Luminous 是 12 版,更新版本的方法還在研究中。
沒有留言:
張貼留言