hosts文件配置

127.0.0.1 localhost

172.16.207.123 glusterfs23

172.16.207.124 glusterfs24

查看系統版本

# cat /etc/redhat-release

下載安裝包(將如下命令貼入終端回車即可)

CentOS7.2系統

wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-api-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-api-devel-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-cli-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-debuginfo-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-devel-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-extra-xlators-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-fuse-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-geo-replication-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-libs-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-rdma-3.7.2-3.el7.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.7/3.7.2/CentOS/epel-7.1/x86_64/glusterfs-server-3.7.2-3.el7.x86_64.rpm

CentOS 6.5系統

wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-api-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-api-devel-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-cli-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-debuginfo-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-devel-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-extra-xlators-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-fuse-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-geo-replication-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-libs-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-rdma-3.6.9-1.el6.x86_64.rpm && wget http://download.gluster.org/pub/gluster/glusterfs/3.6/3.6.9/CentOS/epel-6.5/x86_64/glusterfs-server-3.6.9-1.el6.x86_64.rpm

安裝

yum -y install ./*.rpm

Error: Package: glusterfs-server-3.6.9-1.el6.x86_64 (/glusterfs-server-3.6.9-1.el6.x86_64)

Requires: glusterfs-libs = 3.6.9-1.el6

Installed: glusterfs-libs-3.7.5-19.el6.x86_64 (@base)

如報錯提示上面的信息:

[root@bogon local]# rpm -qa |grep glusterfs

glusterfs-api-3.7.5-19.el6.x86_64

glusterfs-libs-3.7.5-19.el6.x86_64

glusterfs-client-xlators-3.7.5-19.el6.x86_64

glusterfs-3.7.5-19.el6.x86_64

[root@bogon usr]# yum remove glusterfs* 先卸載所有glusterfs相關的包,會多卸載一些依賴包

yum -y install ./*.rpm 重新安裝

yum install libvirt.x86_64 0:0.10.2-60.el6 qemu-img.x86_64 2:0.12.1.2-2.491.el6_8.3 qemu-kvm.x86_64 2:0.12.1.2-2.491.el6_8.3

再將之前卸載掉的依賴包重新裝上。

硬盤分區,如果是先有的目錄,則跳過

fdisk /dev/sda

進入fdiskshell

輸入n,新建分區,其他一路回車即可

輸入w,保存並退出fdisk shell

輸入如下命令,通知內核更新分區表

partprobe

如果是GPT分區用如下命令,進入parted shell

parted /dev/sdb

將磁盤轉換爲gpt格式 大於2T分區才需要轉換GPT

mktable gpt

分區,0 -1表示將整個盤劃到一個區中

mkpart primary 0 -1

打印當前分區情況

print

退出parted shell

quit

通知內核分區表

partprobe

格式化硬盤

mkfs.xfs /dev/sdb1 只有7.0才需要XFS

mkfs.xfs /dev/sda4

編輯/etc/fstab文件

/dev/sda4 /mydata xfs defaults 0 0

/dev/sdb1 /mydata/glusterfs xfs defaults 0 0

掛載

mount -a

啓動glusterfs服務

systemctl enable glusterd && systemctl start glusterd && systemctl status glusterd

停止和禁用防火牆

systemctl stop firewalld && systemctl disable firewalld && systemctl disable firewalld

[root@bogon local]# service glusterd start

Starting glusterd: [ OK ]

[root@bogon local]# service glusterd status

glusterd (pid 14553) is running...

[root@bogon local]# service iptables stop

iptables: Setting chains to policy ACCEPT: nat mangle filte[ OK ]

iptables: Flushing firewall rules: [ OK ]

iptables: Unloading modules: [ OK ]

[root@bogon local]# service iptables status

iptables: Firewall is not running.

再glusterfs23節點添加glusterfs24

gluster peer probe glusterfs24

[root@bogon local]# gluster peer probe glusterfs2

peer probe: success.

或者再glusterfs24節點添加glusterfs23

gluster peer probe glusterfs23

[root@bogon local]# gluster peer probe glusterfs1

peer probe: success. Host glusterfs1 port 24007 already in peer list

創建vmdata卷,副本集爲2分

gluster volume create vmdata replica 2 glusterfs{23,24}:/mydata/glusterfs force

注意:glusterfs{23,24} 表示這個glusterfs組裏的兩臺機器分別爲gluster23和gluster24

[root@bogon local]# gluster volume create vmdata replica 2 glusterfs{1,2}:/mydata force

volume create: vmdata: success: please start the volume to access data

啓動vmdata卷

gluster volume start vmdata

[root@bogon local]# gluster volume start vmdata

volume start: vmdata: success

修改etc/glusterfs/glusterd.vol,添加以下內容,然後重啓glusterd服務:

option rpc-auth-allow-insecure on 兩臺節點都要更改

重起服務

systemctl restart glusterd && systemctl status glusterd

[root@bogon glusterfs]# service glusterd restart

Starting glusterd: [ OK ]

打開卷server.allow-insecure屬性

gluster volume set vmdata server.allow-insecure on

gluster volume set vmdata auth.allow 172.16.*

重起服務

systemctl restart glusterd && systemctl status glusterd

[root@bogon glusterfs]# gluster volume set vmdata server.allow-insecure on

volume set: success

[root@bogon glusterfs]# gluster volume set vmdata auth.allow 172.16.*

volume set: success

設置卷uid和gid:

gluster volume set vmdata storage.owner-uid 107

gluster volume set vmdata storage.owner-gid 107

注意這個uid爲qemu用戶的uid

[root@bogon glusterfs]# gluster volume set vmdata storage.owner-uid 0

volume set: success

[root@bogon glusterfs]# gluster volume set vmdata storage.owner-gid 0

volume set: success

停止卷再啓動卷:

gluster volume stop vmdata && gluster volume start vmdata

[root@bogon local]# gluster volume stop vmdata && gluster volume start vmdata

Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y

volume stop: vmdata: success

volume start: vmdata: success

查看glusterfs volume信息

gluster volume status

查看glusterfs peer節點信息(這兒不顯示自己,只顯示其他節點)

gluster peer status

測試創建KVM創建虛擬硬盤

qemu-img create -f qcow2 gluster://glusterfs23/vmdata/test.img 40G

######################################至此glusterfs安裝完成#########################################

centos6自帶的qemu1.2版本並不支持glusterfs,這裏要編譯新版的qemu.

安裝qemu前先裝上glusterfs-devel

安裝依賴的包

# rpm –ivh http://download.gluster.org/pub/gluster/glusterfs/3.4/3.4.0/CentOS/epel-6Server/x86_64/glusterfs-devel-3.4.0-8.el6.x86_64.rpm

glusterfs-devel-3.6.9-1.el6.x86_64.rpm 在前文已經安裝了。略過

#yum install zlib-devel glib2-devel –y

開始編譯qemu,加上參數–enable-glusterfs

1 wget http://wiki.qemu-project.org/download/qemu-1.5.2.tar.bz2

2 tar jxvf qemu-1.5.2.tar.bz2

# tar jxvf qemu-2.5.0.tar.bz2

3 cd qemu-1.5.2

#cd qemu-2.5.0

4 ./configure –enable-glusterfs #這裏加上enable-glusterfs

如報錯CC錯誤,先安裝GCC編譯器

#yum install gcc

#yum install install autoconf automake libtool

#./configure --enable-glusterfs

#make;make install

4.使用

環境安裝完畢,使用qemu-img創建一個虛擬機磁盤

1 /usr/local/bin/qemu-img create -f qcow2 gluster://192.168.1.100/vm-images/disk1 10G創建一個虛擬機

qemu-system-x86_64 –enable-kvm -m 1024 -drive file=gluster://192.168.1.100/vm-images/disk1 -vnc :15 -cdrom /data/CentOS-6.4-i386-minimal.iso現在可以使用VNC連上並安裝系統了.

配置libvirt存儲池

右鍵點連接的主機選詳情如下圖,輸入存儲池名稱點下一步

選擇正確的文件系統,此處應該選glusterfs,集羣文件系統。

主機名,再ser23上建則填gluster23,在ser24上建填gluster24

源名稱爲卷名填vmdata

源路徑爲/,將蘇你就磁盤存在vmdata卷的根下。

創建完成後只允許刪除,添加捲應當是驅動問題圖形化點不了

因此創建磁盤需要在主機shell中執行命令

qemu-img create -f qcow2 gluster://172.16.12.48/vmdata/test.img 8G

-f 參數爲格式

最後40G參數爲虛擬機硬盤大小

配置互信,使機器能互相訪問對端的libvirt服務

ser23和ser24均執行如下命令

ssh-keygen

一路回車,在~/.ssh/目錄中會生成id_rsa和id_rsa.pub文件

然後在ser23執行

#cat ~/.ssh/id_rsa.pub >>~/.ssh/authorized_keys

並將ser24上 ~/.ssh/id_rsa.pub的內容追加到ser23上的~/.ssh/authorized_keys文件中

#ssh 172.16.12.50 cat ~/.ssh/id_rsa.pub >>~/.ssh/authorized_keys 把50上的.PUB添加到本地文件

然後再將ser23上的~/.ssh/authorized_keys拷貝到ser24的~/.ssh/下

至此互信配置好了,第一次需要手工ssh連一次,接受以下公鑰

在23上執行

ssh ser24 date

如果提示輸入yes/no,不提示輸入密碼則爲正常,輸入yes直接顯示ser24機器的時間。

在24上執行

ssh ser23 date

如果提示輸入yes/no,不提示輸入密碼則爲正常,輸入yes直接顯示ser23機器的時間。

ser23和ser24分別執行如下命令 檢測兩個服務器的libvirt服務通性

virsh -c qemu+ssh://ser24/system list --all

virsh -c qemu+ssh://ser23/system list --all

#virsh -c qemu+ssh://172.16.12.50/system list --all

#virsh -c qemu+ssh://172.16.12.50/system list --all

不用輸入密碼則列出對方所有的虛擬機爲正常

接下來配置高可用切換腳本

ser23和ser24 兩個服務器腳本都在/mydata/sh目錄下

有exec,log,tmp,conf目錄,exec爲腳本可執行文件目錄,log爲整套腳本的運行日誌目錄,tmp爲零時文件目錄(這個目錄不能刪除),conf和下邊的文件暫時沒用。

用法

修改/mydata/sh/exec/kvm_ha.c文件中的如下三行與當前環境一致

#define SHBIN "/usr/bin/sh"

#define SHFILE "/mydata/sh/exec/check.sh"

#define SCHOME "/mydata/sh"

SHBIN指的是運行sh的bash下運行 which sh的輸出結果

SHFILE是高可用檢測腳本的路徑

SCHOME 是檢測腳本的家目錄

修改完成後,在/mydata/sh/exec目錄下執行make

這個腳本存在的理由是爲了提高檢測腳本進程的安全性,同時fork出4個腳本來同時檢測對端,提高可靠性和實時性。

腳本check.sh須修改如下兩行HA_L是本機的主機名,HA_O是另一個主機的主機名,兩臺機器是互換的

HA_O="glusterfs24"

HA_L="glusterfs23"

然後運行如下命令啓動kvm_ha(兩臺機器上都需要如上所述修改配置,然後啓動高可用檢測腳本)

./kvm_ha

執行如下命令,輸出如圖所示,則正常

ps -ef|grep `ps -ef|grep kvm_ha|grep -v grep|awk '{print $2}'`

創建虛擬機

首先在任意一臺宿主機上創建虛擬機硬盤文件

qemu-img create -f qcow2 gluster://glusterfs23/vmdata/PAQZ_shengfan.img 260G

再用virt-mangager創建虛擬機

如果需要加網卡或者修改高級配置需要勾選在安裝前自定義配置,網卡選擇,br1是公網和172.16.200網段,br2是172.16.205網段

當點完創建虛擬機啓動時,高可用檢測腳本會自動將虛擬機同步到對端的服務器上

注意:如果創建完虛擬機,虛擬機再次更改配置的話,需要在兩臺機器上同時修改,保證一樣即可。

高可用檢測腳本概述

-----------------------------------------腳本開始---------------------------------------

#! /bin/bash

SC_HOME=$1

HA_O="glusterfs24"

HA_L="glusterfs23"

VIRSH="/usr/bin/virsh"

HOSTS=$(ls ${SC_HOME}/conf)

################OUT PUT LOG####################

#日誌記錄函數

log(){

time=$(date "+%Y-%m-%d %H:%M:%S")

echo "[$time] $1" >>${SC_HOME}/log/kvm_ha.log

}

#虛擬機同步函數

define_vm(){

demo_name=$1

remote_host=$2

if (( $($VIRSH -c qemu+ssh://${HA_O}/system list --all|grep ${demo_name}|wc -l) != 0 ))

then

log "[INFO] The local riunning demo ${demo_name} in peer."

log "[INFO] Skip defined demo ${demo_name} in peer."

return 1

fi

stat=0

xml_path="${SC_HOME}/tmp/${demo_name}.xml"

$VIRSH dumpxml ${demo_name} >${xml_path} ;stat=$(($?+$stat))

sed -i s/${HA_L}/${HA_O}/g ${xml_path}

$VIRSH -c qemu+ssh://${HA_O}/system define ${xml_path} ;stat=$(($?+$stat))

if (( $stat != 0 ))

then

log "[ERROR] xmldump or define vm ERROR."

return 1

fi

rm -rf ${xml_path}

return 0

}

#虛擬機對比函數

vm_equal() {

#查找本機正在運行的主機,在不在對端關着的主機的列表中

local_running=$($VIRSH list|grep "running"|awk '{print $2}')

for y in ${local_running}

do

vmin=$(echo "${remote_shutdown}"|grep $y|wc -l)

if (( $vmin == 0))

then

#如果不在,則調用虛擬機同步函數同步虛擬機

define_vm $y;stat=$?

if (( $stat != 0 ))

then

log "[ERROR] define vm $y ERROR."

fi

fi

done

}

#故障轉移函數

hosts_failover() {

#爲確保對端的虛擬機down徹底,去特意kill一次。

/usr/bin/ssh ${HA_O} pkill -9 qemu-kvm

log "need failover vm is: $1"

log "[INFO] hosts failover ...."

for i in $1

do

log "[INFO] start vm $i .…"

#故障轉移,啓動本地關機的對端的虛擬機

$VIRSH start $i

log "[INFO] start vm $i ....DONE"

done

log "[INFO] hosts failover ....DONE"

log "[INFO] This script will exit, if ${HA_O} is htealth, restart this script."

exit 0

}

#檢測和獲取對端虛擬機函數

get_remote_vm(){

$VIRSH -c qemu+ssh://${HA_O}/system list --all >${SC_HOME}/tmp/${HA_O} ; stat=$?

if (( stat != 0 ))

then

log "[ERROR] host ${HA_O} con't connect ERROR."

if [ "${remote_running}" != "" ]

then

log "[INFO] well hosts failover."

hosts_failover "${remote_running}"

else

log "[ERROR] you fist run this scirpt or not have any running vm, and con't connect to ${HA_O}"

exit 1

fi

fi

#獲取到的活着的虛擬機和關閉的虛擬機列表

remote_shutdown=$(cat ${SC_HOME}/tmp/${HA_O} |grep "關閉" |awk '{print $2}')

remote_running=$(cat ${SC_HOME}/tmp/${HA_O} |grep "running" |awk '{print $2}')

}

######################main######################

#腳本運行主線

log "[INFO] ======================start==================="

log "[INFO] kvm_ha go started."

死循環,每4秒一次

for (( ; ; ))

do

#get remote_shutdown vm and check slave health

#獲取對端的活着的和關機的虛擬機,並檢測健康狀況,如果連接失敗,直接觸發故障轉移,如果健康狀況正常,繼續執行下一步

get_remote_vm

#check the vm to rsync

#檢測本機活着的虛擬機對端是否存在,如果不存在則從本紀dump出xml,defline到對端,如果存在則進入睡眠4秒鐘,然後進行下一輪檢測

vm_equal

/usr/bin/sleep 4

done

-----------------------------------------腳本結束---------------------------------------

參考地址

https://my.oschina.net/kisops/blog/151073

故障記錄

[root@ser23 qemu]# virsh start HDPNameNode1

error: Failed to start domain HDPNameNode1

error: 內部錯誤:early end of file from monitor: possible problem:

2016-10-27T14:56:35.200581Z qemu-kvm: -drive file=gluster://glusterfs23/vmdata/HDPNameNode1.img,if=none,id=drive-virtio-disk0,format=qcow2: could not open disk image gluster://glusterfs23/vmdata/HDPNameNode1.img: qcow2: Image is corrupt; cannot be opened read/write

解決方法

qemu-img check -r all gluster://glusterfs23/vmdata/HDPNameNode1.img

故障記錄

兩個節點kvm和libvirt安裝必須一樣,否則同步的虛擬機的另一個機器上會起不來。

解決方法,統一默認安裝

故障維護

1. 創建虛擬磁盤

qemu-img create -f qcow2 gluster://glusterfs23/vmdata/zbtest.qcow2 30G

2. 在virtmanager中創建虛擬機

3. 在節點1上將虛擬機配置導出

virsh -c qemu+ssh://glusterfs23/system dumpxml zbtest>zbtest.xml

4. 編輯配置文件,將HOST改爲節點2

vim zbtest.xml

1. 在節點1上將虛擬機配置導入到節點2

virsh -c qemu+ssh://glusterfs24/system define zbtest.xml

2. gluster volume heal vmdata full

當一個節點的數據被刪除,執行上面命令,可重新同步

相關文章