Skip to content

Commit 1804a31

Browse files
author
Inigo Goiri
committed
HDFS-13536. [PROVIDED Storage] HA for InMemoryAliasMap. Contributed by Virajith Jalaparti.
1 parent 5cc2541 commit 1804a31

File tree

16 files changed

+615
-110
lines changed

16 files changed

+615
-110
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ static String concatSuffixes(String... suffixes) {
396396
* @param keys Set of keys to look for in the order of preference
397397
* @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
398398
*/
399-
static Map<String, Map<String, InetSocketAddress>> getAddresses(
399+
public static Map<String, Map<String, InetSocketAddress>> getAddresses(
400400
Configuration conf, String defaultAddress, String... keys) {
401401
Collection<String> nameserviceIds = getNameServiceIds(conf);
402402
return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
@@ -426,7 +426,7 @@ static Map<String, Map<String, InetSocketAddress>> getAddressesForNsIds(
426426
return ret;
427427
}
428428

429-
static Map<String, InetSocketAddress> getAddressesForNameserviceId(
429+
public static Map<String, InetSocketAddress> getAddressesForNameserviceId(
430430
Configuration conf, String nsId, String defaultValue, String... keys) {
431431
Collection<String> nnIds = getNameNodeIds(conf, nsId);
432432
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

+3
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,9 @@ public interface HdfsClientConfigKeys {
184184
"dfs.namenode.snapshot.capture.openfiles";
185185
boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
186186

187+
String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
188+
"dfs.provided.aliasmap.inmemory.dnrpc-address";
189+
187190
/**
188191
* These are deprecated config keys to client code.
189192
*/

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java

+8-1
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@
3737
import org.slf4j.Logger;
3838
import org.slf4j.LoggerFactory;
3939

40+
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
41+
4042
/**
4143
* A FailoverProxyProvider implementation which allows one to configure
4244
* multiple URIs to connect to during fail-over. A random configured address is
@@ -60,6 +62,11 @@ public class ConfiguredFailoverProxyProvider<T> extends
6062

6163
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
6264
Class<T> xface, HAProxyFactory<T> factory) {
65+
this(conf, uri, xface, factory, DFS_NAMENODE_RPC_ADDRESS_KEY);
66+
}
67+
68+
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
69+
Class<T> xface, HAProxyFactory<T> factory, String addressKey) {
6370
this.xface = xface;
6471
this.conf = new Configuration(conf);
6572
int maxRetries = this.conf.getInt(
@@ -81,7 +88,7 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
8188
ugi = UserGroupInformation.getCurrentUser();
8289

8390
Map<String, Map<String, InetSocketAddress>> map =
84-
DFSUtilClient.getHaNnRpcAddresses(conf);
91+
DFSUtilClient.getAddresses(conf, null, addressKey);
8592
Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
8693

8794
if (addressesInNN == null || addressesInNN.size() == 0) {
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.namenode.ha;
19+
20+
import org.apache.hadoop.conf.Configuration;
21+
22+
import java.net.URI;
23+
24+
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
25+
26+
/**
27+
* A {@link ConfiguredFailoverProxyProvider} implementation used to connect
28+
* to an InMemoryAliasMap.
29+
*/
30+
public class InMemoryAliasMapFailoverProxyProvider<T>
31+
extends ConfiguredFailoverProxyProvider<T> {
32+
33+
public InMemoryAliasMapFailoverProxyProvider(
34+
Configuration conf, URI uri, Class<T> xface, HAProxyFactory<T> factory) {
35+
super(conf, uri, xface, factory,
36+
DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS);
37+
}
38+
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

+4-1
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
8686
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
8787
public static final String DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
8888
public static final String DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
89-
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS = "dfs.provided.aliasmap.inmemory.dnrpc-address";
89+
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
90+
HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
9091
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT = "0.0.0.0:50200";
92+
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST = "dfs.provided.aliasmap.inmemory.rpc.bind-host";
93+
9194
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR = "dfs.provided.aliasmap.inmemory.leveldb.dir";
9295
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE = "dfs.provided.aliasmap.inmemory.batch-size";
9396
public static final int DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT = 500;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

+36-1
Original file line numberDiff line numberDiff line change
@@ -1130,7 +1130,42 @@ private static String getNameServiceId(Configuration conf, String addressKey) {
11301130

11311131
return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
11321132
}
1133-
1133+
1134+
/**
1135+
* Determine the {@link InetSocketAddress} to bind to, for any service.
1136+
* In case of HA or federation, the address is assumed to specified as
1137+
* {@code confKey}.NAMESPACEID.NAMENODEID as appropriate.
1138+
*
1139+
* @param conf configuration.
1140+
* @param confKey configuration key (prefix if HA/federation) used to
1141+
* specify the address for the service.
1142+
* @param defaultValue default value for the address.
1143+
* @param bindHostKey configuration key (prefix if HA/federation)
1144+
* specifying host to bind to.
1145+
* @return the address to bind to.
1146+
*/
1147+
public static InetSocketAddress getBindAddress(Configuration conf,
1148+
String confKey, String defaultValue, String bindHostKey) {
1149+
InetSocketAddress address;
1150+
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
1151+
String bindHostActualKey;
1152+
if (nsId != null) {
1153+
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
1154+
address = DFSUtilClient.getAddressesForNameserviceId(
1155+
conf, nsId, null, confKey).get(namenodeId);
1156+
bindHostActualKey = DFSUtil.addKeySuffixes(bindHostKey, nsId, namenodeId);
1157+
} else {
1158+
address = NetUtils.createSocketAddr(conf.get(confKey, defaultValue));
1159+
bindHostActualKey = bindHostKey;
1160+
}
1161+
1162+
String bindHost = conf.get(bindHostActualKey);
1163+
if (bindHost == null || bindHost.isEmpty()) {
1164+
bindHost = address.getHostName();
1165+
}
1166+
return new InetSocketAddress(bindHost, address.getPort());
1167+
}
1168+
11341169
/**
11351170
* Returns nameservice Id and namenode Id when the local host matches the
11361171
* configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

+14-1
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,13 @@
3131
import org.apache.hadoop.conf.Configuration;
3232
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
3333
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
34+
import org.apache.hadoop.hdfs.protocolPB.AliasMapProtocolPB;
35+
import org.apache.hadoop.hdfs.protocolPB.InMemoryAliasMapProtocolClientSideTranslatorPB;
3436
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
3537
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
3638
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
3739
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
40+
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol;
3841
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
3942
import org.apache.hadoop.hdfs.server.namenode.ha.NameNodeHAProxyFactory;
4043
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
@@ -184,6 +187,8 @@ public static <T> ProxyAndInfo<T> createNonHAProxy(
184187
conf, ugi);
185188
} else if (xface == RefreshCallQueueProtocol.class) {
186189
proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi);
190+
} else if (xface == InMemoryAliasMapProtocol.class) {
191+
proxy = (T) createNNProxyWithInMemoryAliasMapProtocol(nnAddr, conf, ugi);
187192
} else {
188193
String message = "Unsupported protocol found when creating the proxy " +
189194
"connection to NameNode: " +
@@ -194,7 +199,15 @@ public static <T> ProxyAndInfo<T> createNonHAProxy(
194199

195200
return new ProxyAndInfo<T>(proxy, dtService, nnAddr);
196201
}
197-
202+
203+
private static InMemoryAliasMapProtocol createNNProxyWithInMemoryAliasMapProtocol(
204+
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
205+
throws IOException {
206+
AliasMapProtocolPB proxy = (AliasMapProtocolPB) createNameNodeProxy(
207+
address, conf, ugi, AliasMapProtocolPB.class, 30000);
208+
return new InMemoryAliasMapProtocolClientSideTranslatorPB(proxy);
209+
}
210+
198211
private static JournalProtocol createNNProxyWithJournalProtocol(
199212
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
200213
throws IOException {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java

+75-20
Original file line numberDiff line numberDiff line change
@@ -20,27 +20,38 @@
2020
import org.apache.hadoop.classification.InterfaceAudience;
2121
import org.apache.hadoop.classification.InterfaceStability;
2222
import org.apache.hadoop.conf.Configuration;
23+
import org.apache.hadoop.hdfs.HAUtil;
24+
import org.apache.hadoop.hdfs.NameNodeProxies;
2325
import org.apache.hadoop.hdfs.protocol.Block;
26+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
2427
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
2528
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
2629
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol;
2730
import org.apache.hadoop.hdfs.server.common.FileRegion;
31+
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
32+
import org.apache.hadoop.hdfs.server.namenode.ha.InMemoryAliasMapFailoverProxyProvider;
2833
import org.apache.hadoop.ipc.ProtobufHelper;
29-
import org.apache.hadoop.ipc.ProtobufRpcEngine;
3034
import org.apache.hadoop.ipc.RPC;
3135
import org.apache.hadoop.net.NetUtils;
3236
import org.slf4j.Logger;
3337
import org.slf4j.LoggerFactory;
3438

3539
import javax.annotation.Nonnull;
40+
import java.io.Closeable;
3641
import java.io.IOException;
37-
import java.net.InetSocketAddress;
42+
import java.net.URI;
43+
import java.net.URISyntaxException;
44+
import java.util.ArrayList;
45+
import java.util.Collection;
3846
import java.util.List;
3947
import java.util.Optional;
4048
import java.util.stream.Collectors;
4149

4250
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
43-
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT;
51+
import static org.apache.hadoop.hdfs.DFSUtil.addKeySuffixes;
52+
import static org.apache.hadoop.hdfs.DFSUtil.createUri;
53+
import static org.apache.hadoop.hdfs.DFSUtilClient.getNameServiceIds;
54+
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX;
4455
import static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.*;
4556
import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.*;
4657

@@ -52,30 +63,69 @@
5263
@InterfaceAudience.Private
5364
@InterfaceStability.Unstable
5465
public class InMemoryAliasMapProtocolClientSideTranslatorPB
55-
implements InMemoryAliasMapProtocol {
66+
implements InMemoryAliasMapProtocol, Closeable {
5667

5768
private static final Logger LOG =
5869
LoggerFactory
5970
.getLogger(InMemoryAliasMapProtocolClientSideTranslatorPB.class);
6071

6172
private AliasMapProtocolPB rpcProxy;
6273

63-
public InMemoryAliasMapProtocolClientSideTranslatorPB(Configuration conf) {
64-
String addr = conf.getTrimmed(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
65-
DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT);
66-
InetSocketAddress aliasMapAddr = NetUtils.createSocketAddr(addr);
74+
public InMemoryAliasMapProtocolClientSideTranslatorPB(
75+
AliasMapProtocolPB rpcProxy) {
76+
this.rpcProxy = rpcProxy;
77+
}
6778

68-
RPC.setProtocolEngine(conf, AliasMapProtocolPB.class,
69-
ProtobufRpcEngine.class);
70-
LOG.info("Connecting to address: " + addr);
71-
try {
72-
rpcProxy = RPC.getProxy(AliasMapProtocolPB.class,
73-
RPC.getProtocolVersion(AliasMapProtocolPB.class), aliasMapAddr, null,
74-
conf, NetUtils.getDefaultSocketFactory(conf), 0);
75-
} catch (IOException e) {
76-
throw new RuntimeException(
77-
"Error in connecting to " + addr + " Got: " + e);
79+
public static Collection<InMemoryAliasMapProtocol> init(Configuration conf) {
80+
Collection<InMemoryAliasMapProtocol> aliasMaps = new ArrayList<>();
81+
// Try to connect to all configured nameservices as it is not known which
82+
// nameservice supports the AliasMap.
83+
for (String nsId : getNameServiceIds(conf)) {
84+
try {
85+
URI namenodeURI = null;
86+
Configuration newConf = new Configuration(conf);
87+
if (HAUtil.isHAEnabled(conf, nsId)) {
88+
// set the failover-proxy provider if HA is enabled.
89+
newConf.setClass(
90+
addKeySuffixes(PROXY_PROVIDER_KEY_PREFIX, nsId),
91+
InMemoryAliasMapFailoverProxyProvider.class,
92+
AbstractNNFailoverProxyProvider.class);
93+
namenodeURI = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
94+
} else {
95+
String key =
96+
addKeySuffixes(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, nsId);
97+
String addr = conf.get(key);
98+
if (addr != null) {
99+
namenodeURI = createUri(HdfsConstants.HDFS_URI_SCHEME,
100+
NetUtils.createSocketAddr(addr));
101+
}
102+
}
103+
if (namenodeURI != null) {
104+
aliasMaps.add(NameNodeProxies
105+
.createProxy(newConf, namenodeURI, InMemoryAliasMapProtocol.class)
106+
.getProxy());
107+
LOG.info("Connected to InMemoryAliasMap at {}", namenodeURI);
108+
}
109+
} catch (IOException | URISyntaxException e) {
110+
LOG.warn("Exception in connecting to InMemoryAliasMap for nameservice "
111+
+ "{}: {}", nsId, e);
112+
}
78113
}
114+
// if a separate AliasMap is configured using
115+
// DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, try to connect it.
116+
if (conf.get(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS) != null) {
117+
URI uri = createUri("hdfs", NetUtils.createSocketAddr(
118+
conf.get(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS)));
119+
try {
120+
aliasMaps.add(NameNodeProxies
121+
.createProxy(conf, uri, InMemoryAliasMapProtocol.class).getProxy());
122+
LOG.info("Connected to InMemoryAliasMap at {}", uri);
123+
} catch (IOException e) {
124+
LOG.warn("Exception in connecting to InMemoryAliasMap at {}: {}", uri,
125+
e);
126+
}
127+
}
128+
return aliasMaps;
79129
}
80130

81131
@Override
@@ -168,7 +218,12 @@ public String getBlockPoolId() throws IOException {
168218
}
169219
}
170220

171-
public void stop() {
172-
RPC.stopProxy(rpcProxy);
221+
@Override
222+
public void close() throws IOException {
223+
LOG.info("Stopping rpcProxy in" +
224+
"InMemoryAliasMapProtocolClientSideTranslatorPB");
225+
if (rpcProxy != null) {
226+
RPC.stopProxy(rpcProxy);
227+
}
173228
}
174229
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java

+5
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import org.apache.hadoop.hdfs.protocol.Block;
2222
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
2323
import org.apache.hadoop.hdfs.server.common.FileRegion;
24+
import org.apache.hadoop.io.retry.Idempotent;
2425

2526
import javax.annotation.Nonnull;
2627
import java.io.IOException;
@@ -69,6 +70,7 @@ public Optional<Block> getNextBlock() {
6970
* FileRegions and the next marker.
7071
* @throws IOException
7172
*/
73+
@Idempotent
7274
InMemoryAliasMap.IterationResult list(Optional<Block> marker)
7375
throws IOException;
7476

@@ -80,6 +82,7 @@ InMemoryAliasMap.IterationResult list(Optional<Block> marker)
8082
* @throws IOException
8183
*/
8284
@Nonnull
85+
@Idempotent
8386
Optional<ProvidedStorageLocation> read(@Nonnull Block block)
8487
throws IOException;
8588

@@ -90,6 +93,7 @@ Optional<ProvidedStorageLocation> read(@Nonnull Block block)
9093
* @param providedStorageLocation
9194
* @throws IOException
9295
*/
96+
@Idempotent
9397
void write(@Nonnull Block block,
9498
@Nonnull ProvidedStorageLocation providedStorageLocation)
9599
throws IOException;
@@ -99,5 +103,6 @@ void write(@Nonnull Block block,
99103
* @return the block pool id associated with the Namenode running
100104
* the in-memory alias map.
101105
*/
106+
@Idempotent
102107
String getBlockPoolId() throws IOException;
103108
}

0 commit comments

Comments
 (0)