-
Notifications
You must be signed in to change notification settings - Fork 5.6k
/
Copy pathrun_check_repl_dbhash.js
103 lines (89 loc) · 3.71 KB
/
run_check_repl_dbhash.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
// Runner for checkDBHashes() that runs the dbhash command on all replica set nodes
// to ensure all nodes have the same data.
import "jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js";
import {DiscoverTopology, Topology} from "jstests/libs/discover_topology.js";
import {Thread} from "jstests/libs/parallelTester.js";
import {ReplSetTest} from "jstests/libs/replsettest.js";
(function() {
async function checkReplicatedDataHashesThread(hosts) {
const {ReplSetTest} = await import("jstests/libs/replsettest.js");
try {
const excludedDBs = jsTest.options().excludedDBsFromDBHash;
const rst = new ReplSetTest(hosts[0]);
rst.checkReplicatedDataHashes(undefined, excludedDBs);
return {ok: 1};
} catch (e) {
return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
}
}
const startTime = Date.now();
assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
let skipped = false;
try {
const conn = db.getMongo();
const topology = DiscoverTopology.findConnectedNodes(conn);
if (topology.type === Topology.kStandalone) {
print('Skipping data consistency checks for cluster because we are connected to a' +
' stand-alone mongod: ' + tojsononeline(topology));
skipped = true;
return;
}
if (topology.type === Topology.kReplicaSet) {
if (topology.nodes.length === 1) {
print('Skipping data consistency checks for cluster because we are connected to a' +
' 1-node replica set: ' + tojsononeline(topology));
skipped = true;
return;
}
const excludedDBs = jsTest.options().excludedDBsFromDBHash;
new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes(undefined, excludedDBs);
return;
}
if (topology.type !== Topology.kShardedCluster) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
const threads = [];
try {
if (topology.configsvr.nodes.length > 1) {
const thread = new Thread(checkReplicatedDataHashesThread, topology.configsvr.nodes);
threads.push(thread);
thread.start();
} else {
print('Skipping data consistency checks for 1-node CSRS: ' + tojsononeline(topology));
}
for (let shardName of Object.keys(topology.shards)) {
const shard = topology.shards[shardName];
if (shard.type === Topology.kStandalone) {
print('Skipping data consistency checks for stand-alone shard: ' +
tojsononeline(topology));
continue;
}
if (shard.type !== Topology.kReplicaSet) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
if (shard.nodes.length > 1) {
const thread = new Thread(checkReplicatedDataHashesThread, shard.nodes);
threads.push(thread);
thread.start();
} else {
print('Skipping data consistency checks for 1-node replica set shard: ' +
tojsononeline(topology));
}
}
} finally {
// Wait for each thread to finish. Throw an error if any thread fails.
const returnData = threads.map(thread => {
thread.join();
return thread.returnData();
});
returnData.forEach(res => {
assert.commandWorked(res, 'data consistency checks failed');
});
}
} finally {
if (!skipped) {
const totalTime = Date.now() - startTime;
print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
}
}
})();