久久久久久久视色,久久电影免费精品,中文亚洲欧美乱码在线观看,在线免费播放AV片

<center id="vfaef"><input id="vfaef"><table id="vfaef"></table></input></center>

    <p id="vfaef"><kbd id="vfaef"></kbd></p>

    
    
    <pre id="vfaef"><u id="vfaef"></u></pre>

      <thead id="vfaef"><input id="vfaef"></input></thead>

    1. 站長資訊網(wǎng)
      最全最豐富的資訊網(wǎng)站

      搭建Ceph分布式存儲

      環(huán)境:

      系統(tǒng)

      IP地址

      主機名(登錄用戶)

      承載角色

      CentOS 7.4 64Bit 1611

      10.199.100.170

      dlp(yzyu)

      ceph-client(root)

      admin-node

      ceph-client

      Centos 7.4 64Bit 1611

      10.199.100.171

      node1(yzyu)

      添加一塊硬盤

      mon-node

      osd0-node

      mds-node

      Centos 7.4 64Bit 1611

      10.199.100.172

      node2(yzyu)

      添加一塊硬盤

      mon-node

      osd1-node

       

      • 配置基礎環(huán)境

      [root@dlp ~]# useradd yzyu  [root@dlp ~]# echo "dhhy" |passwd --stdin dhhy  [root@dlp ~]# cat <<END >>/etc/hosts  10.199.100.170 dlp  10.199.100.171 node1  10.199.100.172 node2  END  [root@dlp ~]# echo "yzyu ALL = (root) NOPASSWD:ALL" >> /etc/sudoers.d/yzyu  [root@dlp ~]# chmod 0440 /etc/sudoers.d/yzyu

      [root@node1~]# useradd yzyu  [root@node1 ~]# echo "yzyu" |passwd --stdin yzyu  [root@node1 ~]# cat <<END >>/etc/hosts  10.199.100.170 dlp  10.199.100.171 node1  10.199.100.172 node2  END  [root@node1 ~]# echo "yzyu ALL = (root) NOPASSWD:ALL" >> /etc/sudoers.d/yzyu  [root@node1 ~]# chmod 0440 /etc/sudoers.d/yzyu
      [root@node2
      ~]# useradd yzyu [root@node2 ~]# echo "yzyu" |passwd --stdin yzyu [root@node2 ~]# cat <<END >>/etc/hosts 10.199.100.170 dlp 10.199.100.171 node1 10.199.100.172 node2 END [root@node2 ~]# echo "yzyu ALL = (root) NOPASSWD:ALL" >> /etc/sudoers.d/yzyu [root@node2 ~]# chmod 0440 /etc/sudoers.d/yzyu

      • 配置ntp時間服務

      [root@dlp ~]# yum -y install ntp ntpdate  [root@dlp ~]# sed -i '/^server/s/^/#/g' /etc/ntp.conf  [root@dlp ~]# sed -i '25aserver 127.127.1.0nfudge 127.127.1.0 stratum 8' /etc/ntp.conf  [root@dlp ~]# systemctl start ntpd  [root@dlp ~]# systemctl enable ntpd  [root@dlp ~]# netstat -lntup

      [root@node1 ~]# yum -y install ntpdate  [root@node1 ~]# /usr/sbin/ntpdate 10.199.100.170  [root@node1 ~]# echo "/usr/sbin/ntpdate 10.199.100.170" >>/etc/rc.local  [root@node1 ~]# chmod +x /etc/rc.local      [root@node2 ~]# yum -y install ntpdate  [root@node2 ~]# /usr/sbin/ntpdate 10.199.100.170  [root@node2 ~]# echo "/usr/sbin/ntpdate 10.199.100.170" >>/etc/rc.local  [root@node2 ~]# chmod +x /etc/rc.local

      • 分別在dlp節(jié)點、node1、node2節(jié)點上安裝Ceph

      [root@dlp ~]# yum -y install yum-utils  [root@dlp ~]# yum-config-manager --add-repo https://dl.Fedoraproject.org/pub/epel/7/x86_64/  [root@dlp ~]# yum -y install epel-release --nogpgcheck  [root@dlp ~]# cat <<END >>/etc/yum.repos.d/ceph.repo  [Ceph]  name=Ceph packages for $basearch  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/$basearch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [Ceph-noarch]  name=Ceph noarch packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [ceph-source]  name=Ceph source packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1  END

      [root@dlp ~]# ls /etc/yum.repos.d/  ##必須保證有默認的官網(wǎng)源,結(jié)合epel源和網(wǎng)易的ceph源,才可以進行安裝;

      bak                    CentOS-fasttrack.repo  ceph.repo

      CentOS-Base.repo       CentOS-Media.repo      dl.fedoraproject.org_pub_epel_7_x86_64_.repo

      CentOS-CR.repo         CentOS-Sources.repo    epel.repo

      CentOS-Debuginfo.repo  CentOS-Vault.repo      epel-testing.repo

      [root@dlp ~]# su - yzyu  [dhhy@dlp ~]$ mkdir ceph-cluster    ##創(chuàng)建ceph主目錄  [dhhy@dlp ~]$ cd ceph-cluster  [dhhy@dlp ceph-cluster]$ sudo yum -y install ceph-deploy    ##安裝ceph管理工具  [dhhy@dlp ceph-cluster]$ sudo yum -y install ceph --nogpgcheck    ##安裝ceph主程序

      [root@node1 ~]# yum -y install yum-utils  [root@ node1 ~]# yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/  [root@node1 ~]# yum -y install epel-release --nogpgcheck  [root@node1 ~]# cat <<END >>/etc/yum.repos.d/ceph.repo  [Ceph]  name=Ceph packages for $basearch  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/$basearch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [Ceph-noarch]  name=Ceph noarch packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [ceph-source]  name=Ceph source packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1  END    [root@node1 ~]# su - yzyu  [dhhy@node1 ~]$ mkdir ceph-cluster  [dhhy@node1~]$ cd ceph-cluster  [dhhy@node1 ceph-cluster]$ sudo yum -y install ceph-deploy  [dhhy@node1 ceph-cluster]$ sudo yum -y install ceph --nogpgcheck  [dhhy@node1 ceph-cluster]$ sudo yum -y install deltarpm      [root@node2 ~]# yum -y install yum-utils  [root@ node1 ~]# yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/  [root@node2 ~]# yum -y install epel-release --nogpgcheck  [root@node2 ~]# cat <<END >>/etc/yum.repos.d/ceph.repo  [Ceph]  name=Ceph packages for $basearch  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/$basearch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [Ceph-noarch]  name=Ceph noarch packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1    [ceph-source]  name=Ceph source packages  baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS  enabled=1  gpgcheck=0  type=rpm-md  gpgkey=https://mirrors.163.com/ceph/keys/release.asc  priority=1  END    [root@node2 ~]# su - yzyu  [dhhy@node2 ~]$ mkdir ceph-cluster  [dhhy@node2 ~]$ cd ceph-cluster  [dhhy@node2 ceph-cluster]$ sudo yum -y install ceph-deploy  [dhhy@node2 ceph-cluster]$ sudo yum -y install ceph --nogpgcheck  [dhhy@node2 ceph-cluster]$ sudo yum -y install deltarpm

      • 在dlp節(jié)點管理node存儲節(jié)點,安裝注冊服務,節(jié)點信息

      [yzyu@dlp ceph-cluster]$ pwd   ##當前目錄必須為ceph的安裝目錄位置  /home/yzyu/ceph-cluster  [yzyu@dlp ceph-cluster]$ ssh-keygen -t rsa   ##主節(jié)點需要遠程管理mon節(jié)點,需要創(chuàng)建密鑰對,并且將公鑰復制到mon節(jié)點  [yzyu@dlp ceph-cluster]$ ssh-copy-id dhhy@dlp  [yzyu@dlp ceph-cluster]$ ssh-copy-id dhhy@node1  [yzyu@dlp ceph-cluster]$ ssh-copy-id dhhy@node2  [yzyu@dlp ceph-cluster]$ ssh-copy-id root@ceph-client  [yzyu@dlp ceph-cluster]$ cat <<END >>/home/dhhy/.ssh/config  Host dlp     Hostname dlp     User yzyu  Host node1     Hostname node1     User yzyu  Host node2     Hostname node2     User yzyu  END  [yzyu@dlp ceph-cluster]$ chmod 644 /home/yzyu/.ssh/config  [yzyu@dlp ceph-cluster]$ ceph-deploy new node1 node2   ##初始化節(jié)點

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      [yzyu@dlp ceph-cluster]$ cat <<END >>/home/yzyu/ceph-cluster/ceph.conf  osd pool default size = 2  END  [yzyu@dlp ceph-cluster]$ ceph-deploy install node1 node2 ##安裝ceph

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      • 配置Ceph的mon監(jiān)控進程

      [yzyu@dlp ceph-cluster]$ ceph-deploy mon create-initial   ##初始化mon節(jié)點

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      注解:node節(jié)點的配置文件在/etc/ceph/目錄下,會自動同步dlp管理節(jié)點的配置文件;

       

      • 配置Ceph的osd存儲

      配置node1節(jié)點的osd1存儲設備:

      [yzyu@node1 ~]$ sudo fdisk /dev/sdc...sdc    ##格式化硬盤,轉(zhuǎn)換為GPT分區(qū)  [yzyu@node1 ~]$ pvcreate /dev/sdc1 /dev/sdd1 /dev/sde1 /dev/sdf1 /dev/sdg1 /dev/sdh1 /dev/sdi1 /dev/sdj1 /dev/sdk1 /dev/sdl1 /dev/sdm1 /dev/sdn1 /dev/sdo1 /dev/sdp1 /dev/sdq1 /dev/sdr1 /dev/sds1 /dev/sdt1 /dev/sdu1 /dev/sdv1 /dev/sdw1 /dev/sdx1 /dev/sdy1 /dev/sdz1    ##創(chuàng)建pv  [yzyu@node1 ~]$ vgcreate vg1 /dev/sdc1 /dev/sdd1 /dev/sde1 /dev/sdf1 /dev/sdg1 /dev/sdh1 /dev/sdi1 /dev/sdj1 /dev/sdk1 /dev/sdl1 /dev/sdm1 /dev/sdn1 /dev/sdo1 /dev/sdp1 /dev/sdq1 /dev/sdr1 /dev/sds1 /dev/sdt1 /dev/sdu1 /dev/sdv1 /dev/sdw1 /dev/sdx1 /dev/sdy1 /dev/sdz1    ##創(chuàng)建vg  [yzyu@node1 ~]$ lvcreate -L 130T -n lv1 vg1    ##劃分空間  [yzyu@node1 ~]$ mkfs.xfs /dev/vg1/lv1    ##格式化

      [yzyu@node1 ~]$ sudo partx -a /dev/vg1/lv1  [yzyu@node1 ~]$ sudo mkfs -t xfs /dev/vg1/lv1  [yzyu@node1 ~]$ sudo mkdir /var/local/osd1  [yzyu@node1 ~]$ sudo vi /etc/fstab  /dev/vg1/lv1 /var/local/osd1 xfs defaults 0 0  :wq  [yzyu@node1 ~]$ sudo mount -a  [yzyu@node1 ~]$ sudo chmod 777 /var/local/osd1  [yzyu@node1 ~]$ sudo chown ceph:ceph /var/local/osd1/  [yzyu@node1 ~]$ ls -ld /var/local/osd1/  [yzyu@node1 ~]$ df -hT  [yzyu@node1 ~]$ exit

      配置node2節(jié)點的osd1存儲設備:

      [yzyu@node2 ~]$ sudo fdisk /dev/sdc...sdc  [yzyu@node2 ~]$ pvcreate /dev/sdc1 /dev/sdd1 /dev/sde1 /dev/sdf1 /dev/sdg1 /dev/sdh1 /dev/sdi1 /dev/sdj1 /dev/sdk1 /dev/sdl1 /dev/sdm1 /dev/sdn1 /dev/sdo1 /dev/sdp1 /dev/sdq1 /dev/sdr1 /dev/sds1 /dev/sdt1 /dev/sdu1 /dev/sdv1 /dev/sdw1 /dev/sdx1 /dev/sdy1 /dev/sdz1  [yzyu@node2 ~]$ vgcreate vg2 /dev/sdc1 /dev/sdd1 /dev/sde1 /dev/sdf1 /dev/sdg1 /dev/sdh1 /dev/sdi1 /dev/sdj1 /dev/sdk1 /dev/sdl1 /dev/sdm1 /dev/sdn1 /dev/sdo1 /dev/sdp1 /dev/sdq1 /dev/sdr1 /dev/sds1 /dev/sdt1 /dev/sdu1 /dev/sdv1 /dev/sdw1 /dev/sdx1 /dev/sdy1 /dev/sdz1  [yzyu@node2 ~]$ lvcreate -L 130T -n lv2 vg2  [yzyu@node2 ~]$ mkfs.xfs /dev/vg2/lv2

      [yzyu@node2 ~]$ sudo partx -a /dev/vg2/lv2  [yzyu@node2 ~]$ sudo mkfs -t xfs /dev/vg2/lv2  [yzyu@node2 ~]$ sudo mkdir /var/local/osd2  [yzyu@node2 ~]$ sudo vi /etc/fstab  /dev/vg2/lv2 /var/local/osd2 xfs defaults 0 0  :wq  [yzyu@node2 ~]$ sudo mount -a  [yzyu@node2 ~]$ sudo chmod 777 /var/local/osd2  [yzyu@node2 ~]$ sudo chown ceph:ceph /var/local/osd2/  [yzyu@node2 ~]$ ls -ld /var/local/osd2/  [yzyu@node2 ~]$ df -hT  [yzyu@node2 ~]$ exit

      dlp管理節(jié)點注冊node節(jié)點:

      [yzyu@dlp ceph-cluster]$ ceph-deploy osd prepare node1:/var/local/osd1 node2:/var/local/osd2   ##初始創(chuàng)建osd節(jié)點并指定節(jié)點存儲文件位置

      搭建Ceph分布式存儲

      [yzyu@dlp ceph-cluster]$ chmod +r /home/yzyu/ceph-cluster/ceph.client.admin.keyring  [yzyu@dlp ceph-cluster]$ ceph-deploy osd activate node1:/var/local/osd1 node2:/var/local/osd2   ##激活ods節(jié)點

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      [yzyu@dlp ceph-cluster]$ ceph-deploy admin node1 node2   ##復制key管理密鑰文件到node節(jié)點中

      搭建Ceph分布式存儲

      [yzyu@dlp ceph-cluster]$ sudo cp /home/dhhy/ceph-cluster/ceph.client.admin.keyring /etc/ceph/  [yzyu@dlp ceph-cluster]$ sudo cp /home/dhhy/ceph-cluster/ceph.conf /etc/ceph/  [yzyu@dlp ceph-cluster]$ ls /etc/ceph/  ceph.client.admin.keyring  ceph.conf  rbdmap  [yzyu@dlp ceph-cluster]$ ceph quorum_status --format json-pretty   ##查看Ceph群集詳細信息

      • 驗證查看ceph集群狀態(tài)信息

      [yzyu@dlp ceph-cluster]$ ceph health

      HEALTH_OK

      [yzyu@dlp ceph-cluster]$ ceph -s   ##查看Ceph群集狀態(tài)

          cluster 24fb6518-8539-4058-9c8e-d64e43b8f2e2

           health HEALTH_OK

           monmap e1: 2 mons at {node1=10.199.100.171:6789/0,node2=10.199.100.172:6789/0}

                  election epoch 6, quorum 0,1 node1,node2

           osdmap e10: 2 osds: 2 up, 2 in

                  flags sortbitwise,require_jewel_osds

            pgmap v20: 64 pgs, 1 pools, 0 bytes data, 0 objects

                  10305 MB used, 30632 MB / 40938 MB avail   ##已使用、剩余、總?cè)萘?/span>

                        64 active+clean

      [dhhy@dlp ceph-cluster]$ ceph osd tree

      ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY

      -1 0.03897 root default                                    

      -2 0.01949     host node1                                  

       0 0.01949         osd.0       up  1.00000          1.00000

      -3 0.01949     host node2                                  

       1 0.01949         osd.1       up  1.00000          1.00000

       

      [yzyu@dlp ceph-cluster]$ ssh yzyu@node1   ##驗證node1節(jié)點的端口監(jiān)聽狀態(tài)以及其配置文件以及磁盤使用情況  

      [yzyu@node1 ~]$ df -hT |grep lv1

      /dev/vg1/lv1                   xfs        20G  5.1G   15G   26% /var/local/osd1           

      [yzyu@node1 ~]$ du -sh /var/local/osd1/

      5.1G /var/local/osd1/

      [yzyu@node1 ~]$ ls /var/local/osd1/

      activate.monmap  active  ceph_fsid  current  fsid  journal  keyring  magic  ready  store_version  superblock  systemd  type  whoami

      [yzyu@node1 ~]$ ls /etc/ceph/

      ceph.client.admin.keyring  ceph.conf  rbdmap  tmppVBe_2

      [yzyu@node1 ~]$ cat /etc/ceph/ceph.conf

      [global]

      fsid = 0fcdfa46-c8b7-43fc-8105-1733bce3bfeb

      mon_initial_members = node1, node2

      mon_host = 10.199.100.171,10.199.100.172

      auth_cluster_required = cephx

      auth_service_required = cephx

      auth_client_required = cephx

      osd pool default size = 2

       

      [yzyu@dlp ceph-cluster]$ ssh yzyu@node2   ##驗證node2節(jié)點的端口監(jiān)聽狀態(tài)以及其配置文件及其磁盤使用情況

      [yzyu@node2 ~]$ df -hT |grep lv2

      /dev/vg2/lv2                   xfs        20G  5.1G   15G   26% /var/local/osd2

      [yzyu@node2 ~]$ du -sh /var/local/osd2/

      5.1G /var/local/osd2/

      [yzyu@node2 ~]$ ls /var/local/osd2/

      activate.monmap  active  ceph_fsid  current  fsid  journal  keyring  magic  ready  store_version  superblock  systemd  type  whoami

      [yzyu@node2 ~]$ ls /etc/ceph/

      ceph.client.admin.keyring  ceph.conf  rbdmap  tmpmB_BTa

      [yzyu@node2 ~]$ cat /etc/ceph/ceph.conf

      [global]

      fsid = 0fcdfa46-c8b7-43fc-8105-1733bce3bfeb

      mon_initial_members = node1, node2

      mon_host = 10.199.100.171,10.199.100.172

      auth_cluster_required = cephx

      auth_service_required = cephx

      auth_client_required = cephx

      osd pool default size = 2

       

      • 配置Ceph的mds元數(shù)據(jù)進程

      [yzyu@dlp ceph-cluster]$ ceph-deploy mds create node1  [yzyu@dlp ceph-cluster]$ ssh dhhy@node1  [yzyu@node1 ~]$ netstat -utpln |grep 68  (No info could be read for "-p": geteuid()=1000 but you should be root.)  tcp        0 0 0.0.0.0:6800 0.0.0.0:*               LISTEN      -                     tcp        0 0 0.0.0.0:6801 0.0.0.0:*               LISTEN      -                     tcp        0 0 0.0.0.0:6802 0.0.0.0:*               LISTEN      -                     tcp        0 0 0.0.0.0:6803 0.0.0.0:*               LISTEN      -                     tcp        0 0 0.0.0.0:6804 0.0.0.0:*               LISTEN      -                     tcp        0 0 192.168.100.102:6789 0.0.0.0:*               LISTEN      -  [yzyu@node1 ~]$ exit

      • 配置Ceph的client客戶端

      [yzyu@dlp ceph-cluster]$ ceph-deploy install ceph-client   ##提示輸入密碼

      搭建Ceph分布式存儲

      [dhhy@dlp ceph-cluster]$ ceph-deploy admin ceph-client

      搭建Ceph分布式存儲

      [yzyu@dlp ceph-cluster]$ su -  [root@dlp ~]# chmod +r /etc/ceph/ceph.client.admin.keyring

      [yzyu@dlp ceph-cluster]$ ceph osd pool create cephfs_data 128   ##數(shù)據(jù)存儲池  pool 'cephfs_data' created  [yzyu@dlp ceph-cluster]$ ceph osd pool create cephfs_metadata 128   ##元數(shù)據(jù)存儲池  pool 'cephfs_metadata' created  [yzyu@dlp ceph-cluster]$ ceph fs new cephfs cephfs_data cephfs_metadata   ##創(chuàng)建文件系統(tǒng)  new fs with metadata pool 1 and data pool 2     [yzyu@dlp ceph-cluster]$ ceph fs ls   ##查看文件系統(tǒng)  name: cephfs, metadata pool: cephfs_data, data pools: [cephfs_metadata ]  [yzyu@dlp ceph-cluster]$ ceph -s      cluster 24fb6518-8539-4058-9c8e-d64e43b8f2e2       health HEALTH_WARN              clock skew detected on mon.node2              too many PGs per OSD (320 > max 300)              Monitor clock skew detected       monmap e1: 2 mons at {node1=10.199.100.171:6789/0,node2=10.199.100.172:6789/0}              election epoch 6, quorum 0,1 node1,node2        fsmap e5: 1/1/1 up {0=node1=up:active}       osdmap e17: 2 osds: 2 up, 2 in              flags sortbitwise,require_jewel_osds        pgmap v54: 320 pgs, 3 pools, 4678 bytes data, 24 objects              10309 MB used, 30628 MB / 40938 MB avail                   320 active+clean

      • 測試Ceph的客戶端存儲

      [root@ceph-client ~]# mkdir /mnt/ceph

      [root@ceph-client ~]# grep key /etc/ceph/ceph.client.admin.keyring |awk ‘{print $3}’ >>/etc/ceph/admin.secret

      [root@ceph-client ~]# cat /etc/ceph/admin.secret

      AQCd/x9bsMqKFBAAZRNXpU5QstsPlfe1/FvPtQ==

      [root@ceph-client ~]# mount -t ceph 10.199.100.171:6789:/  /mnt/ceph/ -o name=admin,secretfile=/etc/ceph/admin.secret

      [root@ceph-client ~]# df -hT |grep ceph

      10.199.100.171:6789:/      ceph       40G   11G   30G   26% /mnt/ceph

      [root@ceph-client ~]# dd if=/dev/zero of=/mnt/ceph/1.file bs=1G count=1

      記錄了1+0 的讀入

      記錄了1+0 的寫出

      1073741824字節(jié)(1.1 GB)已復制,14.2938 秒,75.1 MB/秒

      [root@ceph-client ~]# ls /mnt/ceph/

      1.file
      [root@ceph-client ~]# df -hT |grep ceph

      10.199.100.171:6789:/      ceph       40G   13G   28G   33% /mnt/ceph

       

      • 錯誤整理

      1. 如若在配置過程中出現(xiàn)問題,重新創(chuàng)建集群或重新安裝ceph,那么需要將ceph集群中的數(shù)據(jù)都清除掉,命令如下;

      [dhhy@dlp ceph-cluster]$ ceph-deploy purge node1 node2

      [dhhy@dlp ceph-cluster]$ ceph-deploy purgedata node1 node2

      [dhhy@dlp ceph-cluster]$ ceph-deploy forgetkeys && rm ceph.*

      2.dlp節(jié)點為node節(jié)點和客戶端安裝ceph時,會出現(xiàn)yum安裝超時,大多由于網(wǎng)絡問題導致,可以多執(zhí)行幾次安裝命令;

      3.dlp節(jié)點指定ceph-deploy命令管理node節(jié)點配置時,當前所在目錄一定是/home/dhhy/ceph-cluster/,不然會提示找不到ceph.conf的配置文件;

      4.osd節(jié)點的/var/local/osd*/存儲數(shù)據(jù)實體的目錄權(quán)限必須為777,并且屬主和屬組必須為ceph;

      5. 在dlp管理節(jié)點安裝ceph時出現(xiàn)以下問題

      搭建Ceph分布式存儲

      解決方法:

      1.重新yum安裝node1或者node2的epel-release軟件包;

      2.如若還無法解決,將軟件包下載,使用以下命令進行本地安裝;

      搭建Ceph分布式存儲

      6.如若在dlp管理節(jié)點中對/home/yzyu/ceph-cluster/ceph.conf主配置文件發(fā)生變化,那么需要將其主配置文件同步給node節(jié)點,命令如下:

      搭建Ceph分布式存儲

      node節(jié)點收到配置文件后,需要重新啟動進程:

      搭建Ceph分布式存儲

      搭建Ceph分布式存儲

      7.在dlp管理節(jié)點查看ceph集群狀態(tài)時,出現(xiàn)如下,原因是因為時間不一致所導致;

      搭建Ceph分布式存儲

      將dlp節(jié)點的ntpd時間服務重新啟動,node節(jié)點再次同步時間即可,如下所示:

      搭建Ceph分布式存儲

      8.在dlp管理節(jié)點進行管理node節(jié)點時,所處的位置一定是/home/yzyu/ceph-cluster/,不然會提示找不到ceph.conf主配置文件;

      搭建Ceph分布式存儲

      贊(0)
      分享到: 更多 (0)
      網(wǎng)站地圖   滬ICP備18035694號-2    滬公網(wǎng)安備31011702889846號