ceph容器化-ceph daemon

docker pull ceph/daemon:tag-build-master-jewel-centos-7
mkdir -p /etc/ceph
mkdir -p /var/lib/ceph/



REPO=ceph/daemon
TAG=tag-build-master-jewel-centos-7
REPO_TAG=${REPO}:${TAG}

##### 网络参数
CEPH_PUBLIC_NETWORK="192.168.214.0/24"

HOST_IP=`ifconfig -s | grep -E '^eth0|^en' | cut -d ' ' -f 1 \
    | xargs ifconfig | grep inet | grep -v inet6 \
    | sed -E 's/inet[^0-9]*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}).*/\1/;s/^[[:blank:]]*//'`

##### 挂载卷参数。
VOL_ETC_CEPH=${HOME}/ceph/etc/ceph
VOL_VAR_LIB_CEPH=${HOME}/ceph/var/lib/ceph
  • mon
docker run -d --net=host --name=mon \
    -v ${VOL_ETC_CEPH}:/etc/ceph \
    -v ${VOL_VAR_LIB_CEPH}:/var/lib/ceph \
    -e MON_IP=${HOST_IP} \
    -e CEPH_PUBLIC_NETWORK=${CEPH_PUBLIC_NETWORK} \
    ${REPO_TAG} mon
scp -r ~/ceph/ root@mon-2:~
scp -r ~/ceph/ root@mon-3:~
  • osd
 docker run -d --net=host --name=osd \
      --privileged=true \
      --pid=host \
      -v ${VOL_ETC_CEPH}:/etc/ceph \
      -v ${VOL_VAR_LIB_CEPH}:/var/lib/ceph/ \
      -v /dev/:/dev/ \
      -e OSD_TYPE=disk \
      -e OSD_FORCE_ZAP=1 \
      -e OSD_DEVICE=/dev/sdc \
      ${REPO_TAG} osd
  • pool
docker exec mon ceph osd pool create test 64
docker exec mon  ceph osd pool set-quota test max_objects 10000
docker exec mon  rbd create test --size 10240
  • rbd-provisioner
git clone https://github.com/xiaotech/ceph-pvc
[root@master ~]# ls ceph-pvc/
clusterrolebinding.yaml  deployment.yaml  README.md         role.yaml            storageclass.yaml
clusterrole.yaml                 rolebinding.yaml  serviceaccount.yaml
  • secret
[root@master ~]# cat ceph/etc/ceph/ceph.client.admin.keyring 
[client.admin]
	key = AQBncH9cCU8uLhAA83XdV9+CVuELSvJDuPAlOQ==
	auid = 0
	caps mds = "allow"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"

[root@master ~]# echo AQBncH9cCU8uLhAA83XdV9+CVuELSvJDuPAlOQ== | base64
QVFCbmNIOWNDVTh1TGhBQTgzWGRWOStDVnVFTFN2SkR1UEFsT1E9PQo=

[root@master ~]# cat test/secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
#  namespace: ceph
type: "kubernetes.io/rbd"
data:
  key: QVFCbmNIOWNDVTh1TGhBQTgzWGRWOStDVnVFTFN2SkR1UEFsT1E9PQo=
  • storageclass
[root@master ~]# cat ceph/etc/ceph/ceph.client.admin.keyring 
[client.admin]
	key = AQBncH9cCU8uLhAA83XdV9+CVuELSvJDuPAlOQ==
	auid = 0
	caps mds = "allow"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"
[root@master ~]# echo AQBncH9cCU8uLhAA83XdV9+CVuELSvJDuPAlOQ== | base64
QVFCbmNIOWNDVTh1TGhBQTgzWGRWOStDVnVFTFN2SkR1UEFsT1E9PQo=
[root@master ~]# cat test/storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: ceph-class
provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.214.101:6789,192.168.214.102:6789,192.168.214.103:6789
  adminId: admin
  adminSecretName: ceph-secret
  adminSecretNamespace: ceph
  pool: test
  userId: admin
  userSecretName: ceph-secret
  imageFeatures: layering
  imageFormat: "2"

猜你喜欢

转载自blog.csdn.net/qq_42747099/article/details/88912290