/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,InjectingSocketFactory.class,SocketFactory.class);
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
// when connecting to the first NN.
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster,conf);
// Make the second NN the active one.
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
// Call a non-idempotent method,and ensure the failover of the call proceeds
// successfully.
IoUtils.closeStream(fs.create(TEST_FILE));
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException,URISyntaxException {
FileSystem fs = HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that encryption zones are properly tracked by the standby.
*/
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
final int len = 8196;
final Path dir = new Path("/enc");
final Path dirChild = new Path(dir,"child");
final Path dirFile = new Path(dir,"file");
fs.mkdir(dir,FsPermission.getDirDefault());
dfsAdmin0.createEncryptionZone(dir,TEST_KEY);
fs.mkdir(dirChild,FsPermission.getDirDefault());
DFSTestUtil.createFile(fs,dirFile,len,(short) 1,0xFeed);
String contents = DFSTestUtil.readFile(fs,dirFile);
// Failover the current standby to active.
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
Assert.assertEquals("Got unexpected ez path",dir.toString(),dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
Assert.assertEquals("Got unexpected ez path",dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
Assert.assertEquals("File contents after failover were changed",contents,DFSTestUtil.readFile(fs,dirFile));
}
项目:hadoop
文件:TestWebHDFSForHA.java
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDatanodes(1).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitactive();
DFSTestUtil.addHAConfiguration(conf,LOGICAL_NAME + "remote");
DFSTestUtil.setFakeHttpAddresses(conf,LOGICAL_NAME + "remote");
fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI,conf);
Assert.assertEquals(2,fs.getResolvednNAddr().length);
} finally {
IoUtils.cleanup(null,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Same test as above,but for FileContext.
*/
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
FileSystem fs = HATestUtil.configureFailoverFs(cluster,conf);
NameService spyNS = spyOnNameService();
String logicalHost = fs.getUri().getHost();
Configuration haClientConf = fs.getConf();
FileContext fc = FileContext.getFileContext(haClientConf);
Path root = new Path("/");
fc.listStatus(root);
fc.listStatus(fc.makeQualified(root));
fc.getDefaultFileSystem().getCanonicalServiceName();
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
// setup the config with the dummy provider class
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,config,logicalName);
config.set(DFS_CLIENT_FAIlovER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,DummyLegacyFailoverProxyProvider.class.getName());
Path p = new Path("hdfs://" + logicalName + "/");
// not to use IP address for token service
SecurityUtil.setTokenServiceUseIp(false);
// Logical URI should be used.
assertTrue("Legacy proxy providers should use logical URI.",HAUtil.useLogicalUri(config,p.toUri()));
}
项目:hadoop
文件:TestMover.java
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDatanodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,"MyCluster");
try {
Map<URI,List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,"-p","/foo","/bar");
Collection<URI> namenodes = Dfsutil.getNsServiceRpcUris(conf);
Assert.assertEquals(1,namenodes.size());
Assert.assertEquals(1,movePaths.size());
URI nn = namenodes.iterator().next();
Assert.assertEquals(new URI("hdfs://MyCluster"),nn);
Assert.assertTrue(movePaths.containsKey(nn));
checkMovePaths(movePaths.get(nn),new Path("/foo"),new Path("/bar"));
} finally {
cluster.shutdown();
}
}
项目:hadoop
文件:TestNameNodeRetryCacheMetrics.java
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,2);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDatanodes(3)
.build();
cluster.waitactive();
cluster.transitionToActive(namenodeId);
HATestUtil.setFailoverConfigurations(cluster,conf);
filesystem = (distributedFileSystem) HATestUtil.configureFailoverFs(cluster,conf);
namesystem = cluster.getNamesystem(namenodeId);
metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:aliyun-oss-hadoop-fs
文件:TestWebHDFSForHA.java
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDatanodes(1).build();
HATestUtil.setFailoverConfigurations(cluster,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException,FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" +
TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,and ensure the failover of the call proceeds
// successfully.
IoUtils.closeStream(fs.create(TEST_FILE));
}
/**
* Same test as above,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
// setup the config with the dummy provider class
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,logicalName);
config.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,p.toUri()));
}
项目:aliyun-oss-hadoop-fs
文件:TestMover.java
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDatanodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,new Path("/bar"));
} finally {
cluster.shutdown();
}
}
项目:aliyun-oss-hadoop-fs
文件:TestNameNodeRetryCacheMetrics.java
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,true);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,conf);
namesystem = cluster.getNamesystem(namenodeId);
metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:aliyun-oss-hadoop-fs
文件:MiniQJMHACluster.java
private Configuration initHAConf(URI journalURI,Configuration conf,int numNNs) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,journalURI.toString());
List<String> nns = new ArrayList<String>(numNNs);
int port = basePort;
for (int i = 0; i < numNNs; i++) {
nns.add("127.0.0.1:" + port);
// increment by 2 each time to account for the http port in the config setting
port += 2;
}
// use standard failover configurations
HATestUtil.setFailoverConfigurations(conf,NAMESERVICE,nns);
return conf;
}
项目:aliyun-oss-hadoop-fs
文件:TestBookKeeperHACheckpoints.java
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
Configuration conf = setupCommonConfig();
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
.toString());
BKJMUtil.addJournalManagerDeFinition(conf);
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDatanodes(1)
.manageNameDfsSharedDirs(false)
.build();
cluster.waitactive();
setNNs();
fs = HATestUtil.configureFailoverFs(cluster,conf);
cluster.transitionToActive(0);
}
项目:big-c
文件:TestWebHDFSForHA.java
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDatanodes(1).build();
HATestUtil.setFailoverConfigurations(cluster,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException,FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,and ensure the failover of the call proceeds
// successfully.
IoUtils.closeStream(fs.create(TEST_FILE));
}
/**
* Same test as above,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
// setup the config with the dummy provider class
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,p.toUri()));
}
项目:big-c
文件:TestMover.java
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDatanodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,new Path("/bar"));
} finally {
cluster.shutdown();
}
}
项目:big-c
文件:TestNameNodeRetryCacheMetrics.java
项目:hadoop-2.6.0-cdh5.4.3
文件:TestWebHDFSForHA.java
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDatanodes(1).build();
HATestUtil.setFailoverConfigurations(cluster,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException,FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,and ensure the failover of the call proceeds
// successfully.
IoUtils.closeStream(fs.create(TEST_FILE));
}
/**
* Same test as above,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
// setup the config with the dummy provider class
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,p.toUri()));
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestMover.java
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDatanodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,new Path("/bar"));
} finally {
cluster.shutdown();
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestNameNodeRetryCacheMetrics.java
/**
* Test that the HarFileSystem works with underlying HDFS URIs that have no
* port specified,as is often the case with an HA setup.
*/
@Test
public void testHarUriWithHaUriWithnoport() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDatanodes(1)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.build();
cluster.transitionToActive(0);
HATestUtil.setFailoverConfigurations(cluster,conf);
createEmptyHararchive(HATestUtil.configureFailoverFs(cluster,conf),TEST_HAR_PATH);
URI failoverUri = FileSystem.getDefaultUri(conf);
Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
p.getFileSystem(conf);
} finally {
cluster.shutdown();
}
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException,FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,and ensure the failover of the call proceeds
// successfully.
IoUtils.closeStream(fs.create(TEST_FILE));
}
/**
* Test that the client doesn't ever try to DNS-resolve the logical URI.
* Regression test for HADOOP-9150.
*/
@Test
public void testDoesntDnsResolveLogicalURI() throws Exception {
NameService spyNS = spyOnNameService();
FileSystem fs = HATestUtil.configureFailoverFs(cluster,conf);
String logicalHost = fs.getUri().getHost();
Path qualifiedRoot = fs.makeQualified(new Path("/"));
// Make a few calls against the filesystem.
fs.getCanonicalServiceName();
fs.listStatus(qualifiedRoot);
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Same test as above,but for FileContext.
*/
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
NameService spyNS = spyOnNameService();
FileSystem fs = HATestUtil.configureFailoverFs(cluster,conf);
String logicalHost = fs.getUri().getHost();
Configuration haClientConf = fs.getConf();
FileContext fc = FileContext.getFileContext(haClientConf);
Path root = new Path("/");
fc.listStatus(root);
fc.listStatus(fc.makeQualified(root));
fc.getDefaultFileSystem().getCanonicalServiceName();
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS,Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。