参考http://hj192837.blog.51cto.com/655995/1539329,升级kernel至3.10
disable selinux and iptables
service iptables stop
chkconfig iptables off; chkconfig ip6tables off
setenforce 0
vi /etc/selinux/config
SELINUX=disabled
1. Create ceph user on each Ceph Node
useradd ceph
passwd ceph
Add sudo privileges for the user on each Ceph Node
echo "ceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/ceph
chmod 0440 /etc/sudoers.d/ceph
2. Disable requiretty on each Ceph Node
visudo
# change Defaults requiretty line
Defaults:ceph !requiretty
3. yum install yum-plugin-priorities
vi /etc/yum/pluginconf.d/priorities.conf
[main]
enabled = 1
add ceph yum repository on each ceph Node
vi /etc/yum.repos.d/ceph-extras.repo
[ceph-extras]
name=Ceph Extras Packages
baseurl=file:///ceph/ceph-extras
enabled=1
priority=2
gpgcheck=0
type=rpm-md
[ceph-extras-noarch]
name=Ceph Extras noarch
baseurl=file:///ceph/ceph-extras-noarch
enabled=1
priority=2
gpgcheck=0
type=rpm-md
vi /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages
baseurl=file:///ceph/ceph
enabled=1
priority=2
gpgcheck=0
type=rpm-md
[ceph-noarch]
name=Ceph noarch packages
baseurl=file:///ceph/ceph-noarch
enabled=1
priority=2
gpgcheck=0
type=rpm-md
vi /etc/yum.repos.d/ceph-apache.repo
[apache2-ceph-noarch]
name=Apache noarch packages for Ceph
baseurl=file:///ceph/apache2-ceph-noarch
enabled=1
priority=2
gpgcheck=0
type=rpm-md
vi /etc/yum.repos.d/ceph-fastcgi.repo
[fastcgi-ceph-basearch]
name=FastCGI basearch packages for Ceph
baseurl=file:///ceph/fastcgi-ceph-basearch
enabled=1
priority=2
gpgcheck=0
type=rpm-md
4. add epel yum repository on each ceph Node
vi /etc/yum.repos.d/epel.repo
[epel]
name=epel
baseurl=http://mirrors.sohu.com/fedora-epel/6Server/x86_64
enable=1
gpgcheck=0
vi /etc/hosts (on all nodes)
127.0.0.1 localhost
192.168.1.15 ceph1
192.168.1.16 ceph2
192.168.1.17 ceph3
192.168.1.18 client1
5. on Ceph1 run as ceph user:
ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C '' -N ''
vi ~/.ssh/config
Host ceph2
Hostname ceph2
User ceph
StrictHostKeyChecking no
Host ceph3
Hostname ceph3
User ceph
StrictHostKeyChecking no
ssh-copy-id ceph2
ssh-copy-id ceph3
yum -y install ceph-deploy
mkdir ceph-cluster
cd ceph-cluster
# Create the Cluster
ceph-deploy new ceph1 ceph2 ceph3
# Install Ceph
ceph-deploy install ceph1 ceph2 ceph3
# Add the initial monitor(s) and gather the keys
ceph-deploy mon create-initial
# Add OSDs
ceph-deploy osd --zap-disk create ceph1:sdb
ceph-deploy osd --zap-disk create ceph2:sdb
ceph-deploy osd --zap-disk create ceph3:sdb
# copy the configuration file and admin key to nodes
ceph-deploy admin ceph1 ceph2 ceph3
chmod +r /etc/ceph/ceph.client.admin.keyring
ssh ceph2 chmod +r /etc/ceph/ceph.client.admin.keyring
ssh ceph3 chmod +r /etc/ceph/ceph.client.admin.keyring
Notes: 如果前面只做了:ceph-deploy new ceph1,那么运行下面的命令:
ceph-deploy mon add ceph2 ceph3
check quorum status for monitors:
ceph quorum_status --format json-pretty
# check health
ceph -w
Add Metadata Server:
ceph-deploy mds create node1 (Currently Ceph runs in production with one metadata server only)
ceph mds stat
# List cluster's pool
ceph osd lspools
The default pools include:
data
metadata
rbd
# Create pool
ceph osd pool create {pool-name} {pg-num} [{pgp-num}] [replicated]
example: ceph osd pool create mydatapool {pg-num}
(OSDs * 100) Total PGs = ------------ = pg-num = pgp-num OSD per object
The result should be rounded up to the nearest power of two
Install ceph client on client1:
on ceph-deploy node:
ceph-deploy install client1
ceph-deploy admin client1
chmod +r /etc/ceph/ceph.client.admin.keyring
Create Ceph admin host:
on ceph-deploy node:
ceph-deploy admin admin-host
ceph-deploy config push admin-host