Home > Workload Solutions > SAP > Guides > DVD for High Availability with Red Hat Pacemaker Clusters Running SAP HANA on Dell S5000 Series Servers > Configuring the cluster
Before you start, configure the hdbuser-store for a backup user. This design guide uses “backup” as the username. On both hosts, run the following commands:
# su – th0adm
# hdbuserstore -i add backup localhost:30013@SYSTEMDB system
Note: For data protection reasons, create a backup user with the appropriate permissions on your databases. You can also use the SYSTEM permissions.
To configure the SAP HANA replication:
hana01:# su – th0adm
# hdbsql -i 00 -U backup -d SYSTEMDB "BACKUP DATA USING FILE ('/hana/shared/th0_sys')"
# hdbsql -i 00 -U backup -d SYSTEMDB "BACKUP DATA FOR TH0 USING FILE ('/hana/shared/th0_data')"
Note: Ensure that the backup file destination has enough free space available and is writable for the th0adm user.
th0adm@hana01:# hdbnsutil -sr_enable --name=Node1
hana02:# su – th0adm
th0adm@hana02:# HDB stop
hana02:# scp root@hana01:/usr/sap/TH0/SYS/global/security/rsecssfs/key/SSFS_TH0.KEY /usr/sap/TH0/SYS/global/security/rsecssfs/key/SSFS_TH0.KEY
hana02:# scp root@hana01:/usr/sap/TH0/SYS/global/security/rsecssfs/data/SSFS_TH0.DAT /usr/sap/TH0/SYS/global/security/rsecssfs/data/SSFS_TH0.DAT
hana02:# su – th0adm
th0adm@hana02:# hdbnsutil -sr_register --remoteHost=hana01 --remoteInstance=00 --replicationMode=syncmem --name=Node2
hana02:# su – th0adm
th0adm@hana02:# HDB start
hana01:# su – th0adm
th0adm@hana01:# cdpy
th0adm@hana01:# python systemReplicationStatus.py
The command output confirms that the replication was successful, as shown in the following figure:
To configure Pacemaker in the cluster:
# yum -y install pcs pacemaker fence-agents-ipmilan resource-agents-sap-hana
# passwd hacluster
[enter a password for the user hacluster]
# systemctl enable pcsd.service; systemctl start pcsd.service
# pcs host auth hana01 hana02
Username: hacluster
Password: [enter a password for the user hacluster]
# pcs cluster setup clhana hana01 hana02
# pcs cluster start --all
# pcs cluster enable --all
hana01:# pcs stonith create Hana01Stonith fence_ipmilan pcmk_host_list=hana01 ip=192.168.9.100 username=sap-ipmi password=<ipmi user password> lanplus=1 op monitor interval=30s
hana01:# pcs constraint location Hana01Stonith avoids hana01
hana01:# pcs stonith create Hana02Stonith fence_ipmilan pcmk_host_list=hana02 ip=192.168.9.101 username=sap-ipmi password=<ipmi user password> lanplus=1 op monitor interval=30s
hana01:# pcs constraint location Hana02Stonith avoids hana02
hana01:# su – th0adm
th0adm@hana01:# HDB stop
hana02:# su – th0adm
th0adm@hana02:# HDB stop
[root]# mkdir -p /hana/shared/myHooks
[root]# cp /usr/share/SAPHanaSR/srHook/SAPHanaSR.py /hana/shared/myHooks
[root]# chown -R th0adm:sapsys /hana/shared/myHooks
[ha_dr_provider_SAPHanaSR]
provider = SAPHanaSR
path = /hana/shared/myHooks
execution_order = 1
[trace]
ha_dr_saphanasr = info
Cmnd_Alias NODE1_SOK = /usr/sbin/crm_attribute -n hana_th0_site_srHook_Node1 -v SOK -t crm_config -s SAPHanaSR
Cmnd_Alias NODE1_SFAIL = /usr/sbin/crm_attribute -n hana_th0_site_srHook_Node1 -v SFAIL -t crm_config -s SAPHanaSR
Cmnd_Alias NODE2_SOK = /usr/sbin/crm_attribute -n hana_th0_site_srHook_Node2 -v SOK -t crm_config -s SAPHanaSR
th0adm ALL=(ALL) NOPASSWD: Node1_SOK, Node1_SFAIL, Node2_SOK, NODE2_SFAIL
Defaults!NODE1_SOK, NODE1_SFAIL, NODE2_SOK, NODE2_SFAIL !requiretty
Note: Replace the th0 values with the lowercase SAP SID of your database. Replace Node1 and Node2 with the database names that you provided in the replication setup.
hana01:# chmod 0440 /etc/sudoers.d/20-saphana
hana02:# chmod 0440 /etc/sudoers.d/20-saphana
hana01:# su – th0adm
th0adm@hana01:# HDB start
hana02:# su – th0adm
th0adm@hana02:# HDB start
# pcs property set maintenance-mode=true
# pcs resource defaults update resource-stickiness=1000
# pcs resource defaults update migration-threshold=5000
# pcs resource create SAPHanaTopology_TH0_00 SAPHanaTopology SID=TH0 InstanceNumber=00 \
op start timeout=600 \
op stop timeout=300 \
op monitor interval=10 timeout=600 \
clone clone-max=2 clone-node-max=1 interleave=true
# pcs resource create SAPHana_TH0_00 SAPHana SID=TH0 InstanceNumber=00 \
PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true \
op start timeout=3600 \
op stop timeout=3600 \
op monitor interval=61 role="Slave" timeout=700 \
op monitor interval=59 role="Master" timeout=700 \
op promote timeout=3600 \
op demote timeout=3600 \
promotable notify=true clone-max=2 clone-node-max=1 interleave=true
# pcs resource create vip_TH0_00 IPaddr2 ip="10.14.20.10"
# pcs constraint order SAPHanaTopology_TH0_00-clone then SAPHana_TH0_00-clone symmetrical=false
# pcs constraint colocation add vip_TH0_00 with master SAPHana_TH0_00-clone 2000
pcs property set maintenance-mode=false
trhana01:~ # crm_mon -r1
Status of pacemakerd: 'Pacemaker is running' (last updated 2023-12-15 10:42:58 -05:00)
Cluster Summary:
* Stack: corosync
* Current DC: hana01 (version 2.1.5-9.3.el8_8-a3f44794f94) - partition with quorum
* Last updated: Fri Dec 15 10:42:59 2023
* Last change: Fri Dec 15 10:42:54 2023 by root via crm_attribute on hana01
* 2 nodes configured
* 7 resource instances configured
Node List:
* Online: [ hana01 hana02 ]
Full List of Resources:
* Node0Stonith (stonith:fence_ipmilan): Started hana02
* Node1Stonith (stonith:fence_ipmilan): Started hana01
* Clone Set: SAPHanaTopology_TH0_00-clone [SAPHanaTopology_TH0_00]:
* Started: [ hana01 hana02 ]
* Clone Set: SAPHana_TH0_00-clone [SAPHana_TH0_00] (promotable):
* Masters: [ hana01 ]
* Slaves: [ hana02 ]
* vip_TH0_00 (ocf::heartbeat:IPaddr2): Started hana01
trhana01:# su – th0adm
th0adm@hana01:# cdpy
th0adm@hana01:# python systemReplicationStatus.py
The following figure shows the command output: