glusterfs

/etc/hosts
bsp:

127.0.0.1 localhost localhost.localdomain www3
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.xxx.xxx.xxx www1
10.xxx.xxx.xxx www2
10.xxx.xxx.xxx wplb cluster
10.xxx.xxx.xxx wpdb

mkdir -p /data/brick1
yum install xfsprogs xfsdump
mkfs.xfs -i size=512 /dev/sdb2
echo ‚/dev/sdb2 /data/brick1 xfs defaultsnoatime 1 2‘ >> /etc/fstab
mount -a

# CENTOS
yum install epel-release
wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo
yum install glusterfs-server
service glusterd start
service glusterd status
chkconfig glusterd on

#Ubuntu
apt-get install glusterfs-server
apt-get install nfs-kernel-server
apt-get install nfs-common

#memcacheD
apt-get install php-memcached
apt-get install php-memcache
-> das auf allen maschinen:
nano /etc/memcached.conf
-l 127.0.0.1 wird zu -l [serverip]
service memcached restart
nano /etc/php5/apache2/php.ini
session.save_handler = memcache
session.save_path = ‚tcp://10.1.1.1:11211,tcp://10.2.2.2:11211,tcp://10.3.3.3:11211‘
nano /etc/php5/mods-available/memcache.ini
memcache.allow_failover=1
memcache.session_redundancy=4 [maschinen plus 1?]
service apache2 reload

***** Freigabe glusterports iptables
iptables -I INPUT -m state –state NEW -m tcp -p tcp –dport 24007:24008 -j ACCEPT
iptables -I INPUT -m state –state NEW -m tcp -p tcp –dport 49152:49154 -j ACCEPT
iptables -I INPUT -m state –state NEW -m tcp -p tcp –dport 111 -j ACCEPT
iptables -I INPUT -m state –state NEW -m udp -p udp –dport 111 -j ACCEPT
iptables -I INPUT -m state –state NEW -m tcp -p tcp –dport 38465:38467 -j ACCEPT
service iptables save
service iptables restart

gluster peer probe 10.xxx.xxx.xxx

# peer probe auf anderen/m Server wiederholen
mkdir /data/brick1/gv0
gluster volume create gv0 replica 2 server1:/data/brick1/gv0 server2:/data/brick1/gv0
gluster volume start gv0
gluster volume info

#in /etc/sysconfig/network-scripts/ifcfg-eth0 einfügen für automount
LINKDELAY=30

echo ‚www1:/gv0 /home/wpworker/www glusterfs defaults,_netdev 0 0‘ >> /etc/fstab

# Cluster heilen und neu starten
gluster volume heal gv0 info
gluster volume heal gv0
gluster volume start gv0 force

# Clusterverbindung besteht schon
setfattr -x trusted.glusterfs.volume-id $brick_path
setfattr -x trusted.gfid $brick_path
rm -rf $brick_path/.glusterfs

Comments are closed.