当前位置: 代码迷 >> 综合 >> fence+pacemaker
  详细解决方案

fence+pacemaker

热度:71   发布时间:2024-02-10 10:45:17.0

实验环境:
fence : 172.25.4.250

server1,server2开启pacemaker集群服务

[root@server1 ~]# systemctl start pcsd
[root@server1 ~]# systemctl enable pcsd
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.
[root@server2 ~]# systemctl start pcsd
[root@server2 ~]# systemctl enable pcsd
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.
[root@server1 ~]# pcs cluster start --all
server1: Starting Cluster (corosync)...
server2: Starting Cluster (corosync)...
server1: Starting Cluster (pacemaker)...
server2: Starting Cluster (pacemaker)...

server1,server2,foundation4安装fence

[root@server1 ~]# yum install -y fence-virt.x86_64
[root@server2 ~]# yum install -y fence-virt.x86_64
[root@server1 ~]# pcs stonith list
fence_virt - Fence agent for virtual machines
fence_xvm - Fence agent for virtual machines[root@foundation4 ~]# yum install -y fence-virtd fence-virtd-libvirt fence-virtd-multicast
[root@foundation4 ~]# rpm -qa|grep fence
fence-virtd-0.4.0-4.el8.x86_64
fence-virtd-multicast-0.4.0-4.el8.x86_64
libxshmfence-1.3-2.el8.x86_64
fence-virtd-libvirt-0.4.0-4.el8.x86_64[root@foundation4 ~]# fence_virtd -c
Module search path [/usr/lib64/fence-virt]: Available backends:libvirt 0.3
Available listeners:multicast 1.2Listener modules are responsible for accepting requests
from fencing clients.Listener module [multicast]: The multicast listener module is designed for use environments
where the guests and hosts may communicate over a network using
multicast.The multicast address is the address that a client will use to
send fencing requests to fence_virtd.Multicast IP Address [225.0.0.12]: Using ipv4 as family.Multicast IP Port [1229]: Setting a preferred interface causes fence_virtd to listen only
on that interface.  Normally, it listens on all interfaces.
In environments where the virtual machines are using the host
machine as a gateway, this *must* be set (typically to virbr0).
Set to 'none' for no interface.Interface [virbr0]: br0The key file is the shared key information which is used to
authenticate fencing requests.  The contents of this file must
be distributed to each physical host and virtual machine within
a cluster.Key File [/etc/cluster/fence_xvm.key]: Backend modules are responsible for routing requests to
the appropriate hypervisor or management layer.Backend module [libvirt]: The libvirt backend module is designed for single desktops or
servers.  Do not use in environments where virtual machines
may be migrated between hosts.Libvirt URI [qemu:///system]: Configuration complete.=== Begin Configuration ===
backends {libvirt {uri = "qemu:///system";}}listeners {multicast {port = "1229";family = "ipv4";interface = "br0";address = "225.0.0.12";key_file = "/etc/cluster/fence_xvm.key";}}fence_virtd {module_path = "/usr/lib64/fence-virt";backend = "libvirt";listener = "multicast";
}=== End Configuration ===
Replace /etc/fence_virt.conf with the above [y/N]? y
[root@foundation4 ~]# mkdir /etc/cluster
[root@foundation4 ~]# cd /etc/cluster
[root@foundation4 cluster]# dd if=/dev/urandom of=fence_xvm.key bs=128 count=1
1+0 records in
1+0 records out
128 bytes copied, 6.6503e-05 s, 1.9 MB/s
[root@foundation4 cluster]# du -h fence_xvm.key
4.0K	fence_xvm.key
[root@foundation4 cluster]# file fence_xvm.key
fence_xvm.key: data
[root@foundation4 cluster]# systemctl restart fence_virtd.service 
[root@foundation4 cluster]# netstat -anlpu |grep 1229
udp        0      0 0.0.0.0:1229            0.0.0.0:*                           9615/fence_virtd    [root@foundation4 cluster]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.[root@foundation4 cluster]# vim /etc/sysconfig/selinux SELINUX=disabledserver1,server2mkdir /etc/cluster[root@foundation4 cluster]# scp fence_xvm.key root@172.25.4.1:/etc/cluster
fence_xvm.key                                                       100%  128   237.4KB/s   00:00    [root@foundation4 cluster]# scp fence_xvm.key root@172.25.4.2:/etc/cluster
fence_xvm.key                                                       100%  128   237.4KB/s   00:00    [root@server1 ~]# mkdir /etc/cluster
[root@server1 ~]# cd /etc/cluster/
[root@server1 cluster]# ls
fence_xvm.key[root@server1 cluster]# pcs stonith create vmfence fence_xvm pcmk_host_map="server1:vm1;server2:vm2" op monitor interval=60s
[root@server1 cluster]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server1 (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sat Aug  8 10:59:44 2020
Last change: Sat Aug  8 10:59:39 2020 by root via cibadmin on server12 nodes configured
3 resources configuredOnline: [ server1 server2 ]Full list of resources:Resource Group: hagroupvip	(ocf::heartbeat:IPaddr2):	Started server1haproxy	(systemd:haproxy):	Started server1vmfence	(stonith:fence_xvm):	Started server2Daemon Status:corosync: active/disabledpacemaker: active/disabledpcsd: active/enabled
[root@server1 cluster]# pcs property set stonith-enabled=true
[root@server1 cluster]# crm_verify -LV[root@server1 cluster]# pcs cluster enable --all
server1: Cluster Enabled
server2: Cluster Enabled
[root@server1 cluster]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server1 (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sat Aug  8 11:02:46 2020
Last change: Sat Aug  8 10:59:39 2020 by root via cibadmin on server12 nodes configured
3 resources configuredOnline: [ server1 server2 ]Full list of resources:Resource Group: hagroupvip	(ocf::heartbeat:IPaddr2):	Started server1haproxy	(systemd:haproxy):	Started server1vmfence	(stonith:fence_xvm):	Started server2Daemon Status:corosync: active/enabledpacemaker: active/enabledpcsd: active/enabled[root@server2 ~]# fence_xvm -H vm1
[root@server2 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server2 (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sat Aug  8 11:12:09 2020
Last change: Sat Aug  8 10:59:39 2020 by root via cibadmin on server12 nodes configured
3 resources configuredOnline: [ server2 ]
OFFLINE: [ server1 ]Full list of resources:Resource Group: hagroupvip	(ocf::heartbeat:IPaddr2):	Started server2haproxy	(systemd:haproxy):	Started server2vmfence	(stonith:fence_xvm):	Started server2Daemon Status:corosync: active/enabledpacemaker: active/enabledpcsd: active/enabledserver1自启[root@server2 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server2 (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sat Aug  8 11:14:01 2020
Last change: Sat Aug  8 10:59:39 2020 by root via cibadmin on server12 nodes configured
3 resources configuredOnline: [ server1 server2 ]Full list of resources:Resource Group: hagroupvip	(ocf::heartbeat:IPaddr2):	Started server2haproxy	(systemd:haproxy):	Started server2vmfence	(stonith:fence_xvm):	Started server1fence又切回server1,是因为分册服务总是在对端
当一方发生问题时,他们互相认为是对方的问题,会告诉fence让重启对方,但是server1已经挂了,无法告诉fence,但是server2会连接到fence,重启sevrer1Daemon Status:corosync: active/enabledpacemaker: active/enabledpcsd: active/enabled让server2内核崩溃[root@server2 ~]# echo c >/proc/sysrq-trigger server2重新自启[root@server1 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server1 (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sat Aug  8 11:22:49 2020
Last change: Sat Aug  8 10:59:39 2020 by root via cibadmin on server12 nodes configured
3 resources configuredOnline: [ server1 server2 ]Full list of resources:Resource Group: hagroupvip	(ocf::heartbeat:IPaddr2):	Started server1haproxy	(systemd:haproxy):	Started server1vmfence	(stonith:fence_xvm):	Started server2Daemon Status:corosync: active/enabledpacemaker: active/enabledpcsd: active/enabled[root@server1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00inet 127.0.0.1/8 scope host lovalid_lft forever preferred_lft foreverinet6 ::1/128 scope host valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000link/ether 52:54:00:5a:95:fe brd ff:ff:ff:ff:ff:ffinet 172.25.4.1/24 brd 172.25.4.255 scope global eth0valid_lft forever preferred_lft foreverinet 172.25.4.100/24 brd 172.25.4.255 scope global secondary eth0valid_lft forever preferred_lft foreverinet6 fe80::5054:ff:fe5a:95fe/64 scope link valid_lft forever preferred_lft forever