forked from IBM/db2-samples
-
Notifications
You must be signed in to change notification settings - Fork 4
/
db2ha_sample_HADR.xml
147 lines (146 loc) · 8.1 KB
/
db2ha_sample_HADR.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
<?xml version="1.0" encoding="UTF-8"?>
<!--
****************************************************************************
** Licensed Materials - Property of IBM
**
** Governed under the terms of the International
** License Agreement for Non-Warranted Sample Code.
**
** (C) COPYRIGHT International Business Machines Corp. 2007
** All Rights Reserved.
**
** US Government Users Restricted Rights - Use, duplication or
** disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
*****************************************************************************
**
** SOURCE FILE NAME: db2ha_sample_HADR.xml
**
** SAMPLE: Initial Setup Configuration of DB2 HA HADR failover automation using db2haicu
**
** FUNCTION: This sample showcases the way in which one can write XML configuration file for HADR
** failover automation using db2haicu HA utility
**
** USAGE: db2haicu -f db2ha_sample_HADR.xml
**
** DESCRIPTION: The environment for this sample is described below:
**
** 1. The physical topology is already setup and available
** The hardware topology is includes the following
** - 2 computers (hasys01, hasys02)
** - 1 NIC/box (eth0 on both systems)
** - 1 Virtual IP address
** 2. DB2 has to be installed on both nodes (hasys01 and hasys02)
** 3. Cluster manager has to be installed and running on both nodes (hasys01 and hasys02)
** 4. The instance and database are only created on hasys01 that to on shared disk
**
** PREREQUISITES:
** 1. Hardware installed and configured (physical networks)
** 2. IP addresses reserved or assigned
** 3. OS is installed, patched and configured
** 4. Users, groups and authentication set-up on both machines
** 5. TSA v2.2 installed and configured on both nodes
** 6. Gathering the information on hardware specifications like IP, NIC
** 7. Root privilege is required while installing the DB2
**
*****************************************************************************
*****************************************************************************
**
** For detailed information about database backup and database recovery, see
** the Data Recovery and High Availability Guide and Reference. This manual
** will help you to determine which database and table space recovery methods
** are best suited to your business environment.
**
** For more information on the sample programs, see the README file.
**
** For information on developing C applications, see the Application
** Development Guide.
**
** For information on using SQL statements, see the SQL Reference.
**
** For information on DB2 APIs, see the Administrative API Reference.
**
** For the latest information on programming, building, and running DB2
** applications, visit the DB2 application development website:
** http://www.software.ibm.com/data/db2/udb/ad
****************************************************************************/
-->
<!-- ================================================================= -->
<!-- = DB2 High Availability configuration schema = -->
<!-- = Schema describes the elements of DB2 High Availability = -->
<!-- = that are used in the configuration of a HA cluster = -->
<!-- ================================================================= -->
<DB2Cluster xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="db2ha.xsd" clusterManagerName="TSA" version="1.0">
<!-- ================================================================= -->
<!-- = ClusterDomain element = -->
<!-- = This element encapsulates the cluster configuration = -->
<!-- = specification = -->
<!-- = Creating cluster domain of name db2HAdomain = -->
<!-- = Creating an IP quorum device (IP 19.126.4.5) = -->
<!-- = The IP must be pingable at all times by each of the nodes in = -->
<!-- = the cluster domain = -->
<!-- ================================================================= -->
<ClusterDomain domainName="db2HAdomain">
<Quorum quorumDeviceProtocol="network" quorumDeviceName="19.126.4.5"/>
<!-- ================================================================= -->
<!-- = Physical network element = -->
<!-- = The physical network specifies the network type, protocol = -->
<!-- = IP address, subnet mask, and NIC name = -->
<!-- = Define two logical groupings of NICs = -->
<!-- = Define two logical groupings of NICs = -->
<!-- ================================================================= -->
<PhysicalNetwork physicalNetworkName="db2_public_network_0" physicalNetworkProtocol="ip">
<Interface interfaceName="en0" clusterNodeName="hasys01">
<IPAddress baseAddress="19.126.52.139" subnetMask="255.255.255.0" networkName="db2_public_network_0"/>
</Interface>
<Interface interfaceName="en0" clusterNodeName="hasys02">
<IPAddress baseAddress="19.126.52.140" subnetMask="255.255.255.0" networkName="db2_public_network_0"/>
</Interface>
</PhysicalNetwork>
<PhysicalNetwork physicalNetworkName="db2_private_network_0" physicalNetworkProtocol="ip">
<Interface interfaceName="eth1" clusterNodeName="hasys01">
<IPAddress baseAddress="192.168.23.101" subnetMask="255.255.255.0" networkName="db2_private_network_0"/>
</Interface>
<Interface interfaceName="eth1" clusterNodeName="hasys02">
<IPAddress baseAddress="192.168.23.102" subnetMask="255.255.255.0" networkName="db2_private_network_0"/>
</Interface>
</PhysicalNetwork>
<!-- ================================================================= -->
<!-- = ClusterNodeName element = -->
<!-- = The set of nodes in the cluster domain = -->
<!-- = Here the defined set of nodes in the domain is = -->
<!-- = hasys01, hasys02 = -->
<!-- ================================================================= -->
<ClusterNode clusterNodeName="hasys01"/>
<ClusterNode clusterNodeName="hasys02"/>
</ClusterDomain>
<!-- ================================================================= -->
<!-- = Failover policy element = -->
<!-- = The failover policy specifies the failover order of the = -->
<!-- = cluster nodes = -->
<!-- = In the current sample the failover policy is to restart = -->
<!-- = instance in place (LocalRestart) = -->
<!-- ================================================================= -->
<FailoverPolicy>
<HADRFailover></HADRFailover>
</FailoverPolicy>
<!-- ================================================================= -->
<!-- = DB2 Partition element = -->
<!-- = The DB2 partition type specifies a DB2 Instance Name, = -->
<!-- = partition number = -->
<!-- ================================================================= -->
<DB2PartitionSet>
<DB2Partition dbpartitionnum="0" instanceName="db2inst1">
</DB2Partition>
</DB2PartitionSet>
<!-- ================================================================= -->
<!-- = HADRDBSet = -->
<!-- = Set of HADR Databases for this instance = -->
<!-- = Specify the databaseName, the name of the local instance on = -->
<!-- = this machine controlling the HADR database, the name of the = -->
<!-- = remote instance in this HADR pair, the name of the local = -->
<!-- = hostname and the remote hostname for the remote instance = -->
<!-- ================================================================= -->
<HADRDBSet>
<HADRDB databaseName="HADRDB" localInstance="db2inst1" remoteInstance="db2inst1" localHost="hasys01" remoteHost="hasys02" />
</HADRDBSet>
</DB2Cluster>