项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
private Callable<Void> readFromBlock(final BlockReader reader,final ByteBuffer buf) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
actualReadFromBlock(reader,buf);
return null;
} catch (IOException e) {
LOG.info(e.getMessage());
throw e;
}
}
};
}
项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
private BlockReader newBlockReader(final ExtendedBlock block,long offsetInBlock,DatanodeInfo dnInfo) {
if (offsetInBlock >= block.getNumBytes()) {
return null;
}
try {
InetSocketAddress dnAddr = getSocketAddress4Transfer(dnInfo);
Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccesstoken(
block,EnumSet.of(BlockTokenIdentifier.AccessMode.READ));
/*
* This can be further improved if the replica is local,then we can
* read directly from DN and need to check the replica is FINALIZED
* state,notice we should not use short-circuit local read which
* requires config for domain-socket in UNIX or legacy config in Windows.
*
* Todo: add proper tracer
*/
return RemoteBlockReader2.newBlockReader(
"dummy",block,blockToken,offsetInBlock,block.getNumBytes() - offsetInBlock,true,"",newConnectedPeer(block,dnAddr,dnInfo),dnInfo,null,cachingStrategy,datanode.getTracer());
} catch (IOException e) {
return null;
}
}
项目:indexr
文件:DFSByteBufferReader.java
private void tryGetLocalFile() {
if (tryGetLocalFileTimes >= TRY_GET_LOCAL_FILE_LIMIT) {
return;
}
if (isSingleBlock && HDFS_READ_HACK_ENABLE) {
try {
InputStream is = input.getWrappedStream();
if (is instanceof DFSInputStream) {
BlockReader blockReader = MemoryUtil.getDFSInputStream_blockReader(is);
if (blockReader != null && blockReader.isShortCircuit()) {
localFile = MemoryUtil.getBlockReaderLocal_dataIn(blockReader);
}
}
} catch (Throwable e) {
logger.debug("HDFS READ HACK Failed.",e);
}
}
tryGetLocalFileTimes++;
}
项目:hadoop-EAR
文件:DFSInputStream.java
/**
* Close the given BlockReader and cache its socket.
*/
private void closeBlockReader(BlockReader reader,boolean reuseConnection)
throws IOException {
if (reader.hasSentStatusCode()) {
Socket oldSock = reader.takeSocket();
if (dfsClient.getDataTransferProtocolVersion() <
DataTransferProtocol.READ_REUSE_CONNECTION_VERSION ||
!reuseConnection) {
// close the sock for old datanode.
if (oldSock != null) {
IoUtils.closeSocket(oldSock);
}
} else {
socketCache.put(oldSock);
}
}
reader.close();
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
Socket s = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr,HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr,"test-blockpoolid",block.getBlockId());
BlockReader blockReader =
BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf),file,lblock.getBlockToken(),-1,"TestDatanodeVolumeFailure",TcpPeerServer.peerFromSocket(s),datanode,false);
blockReader.close();
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
Socket s = null;
BlockReader blockReader = null;
Block block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getName());
s = new Socket();
s.connect(targetAddr,HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
String file = BlockReader.getFileName(targetAddr,block.getBlockId());
blockReader =
BlockReader.newBlockReader(s,lblock
.getBlockToken(),4096);
// nothing - if it fails - it will throw and exception
}
项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
private void initChecksumAndBufferSizeIfNeeded(BlockReader blockReader) {
if (checksum == null) {
checksum = blockReader.getDataChecksum();
bytesPerChecksum = checksum.getBytesPerChecksum();
// The bufferSize is flat to divide bytesPerChecksum
int readBufferSize = STRIPED_READ_BUFFER_SIZE;
bufferSize = readBufferSize < bytesPerChecksum ? bytesPerChecksum :
readBufferSize - readBufferSize % bytesPerChecksum;
} else {
assert blockReader.getDataChecksum().equals(checksum);
}
}
项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
/**
* Read bytes from block
*/
private void actualReadFromBlock(BlockReader reader,ByteBuffer buf)
throws IOException {
int len = buf.remaining();
int n = 0;
while (n < len) {
int nread = reader.read(buf);
if (nread <= 0) {
break;
}
n += nread;
}
}
项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
private void closeBlockReader(BlockReader blockReader) {
try {
if (blockReader != null) {
blockReader.close();
}
} catch (IOException e) {
// ignore
}
}
项目:hadoop-EAR
文件:DFSInputStream.java
protected BlockReader getBlockReader(int protocolVersion,int namespaceId,InetSocketAddress dnAddr,String file,long blockId,long generationStamp,long startOffset,long len,int bufferSize,boolean verifyChecksum,String clientName,long bytestocheckReadSpeed,long minReadSpeedBps,boolean reuseConnection,FSClientReadProfilingData cliData)
throws IOException {
return getBlockReader(protocolVersion,namespaceId,blockId,generationStamp,startOffset,len,bufferSize,verifyChecksum,clientName,bytestocheckReadSpeed,minReadSpeedBps,reuseConnection,cliData,options);
}
项目:cumulus
文件:JspHelper.java
public static void streamBlockInAscii(InetSocketAddress addr,Token<BlockTokenIdentifier> blockToken,long genStamp,long blockSize,long offsetIntoBlock,long chunkSizetoView,JspWriter out,Configuration conf) throws IOException {
if (chunkSizetoView == 0) return;
Socket s = new Socket();
s.connect(addr,HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
long amtToRead = Math.min(chunkSizetoView,blockSize - offsetIntoBlock);
// Use the block name for file name.
String file = BlockReader.getFileName(addr,blockId);
BlockReader blockReader = BlockReader.newBlockReader(s,new Block(blockId,genStamp),offsetIntoBlock,amtToRead,conf.getInt("io.file.buffer.size",4096));
byte[] buf = new byte[(int)amtToRead];
int readOffset = 0;
int retries = 2;
while ( amtToRead > 0 ) {
int numRead;
try {
numRead = blockReader.readAll(buf,readOffset,(int)amtToRead);
}
catch (IOException e) {
retries--;
if (retries == 0)
throw new IOException("Could not read data from datanode");
continue;
}
amtToRead -= numRead;
readOffset += numRead;
}
blockReader = null;
s.close();
out.print(HtmlQuoting.quoteHtmlChars(new String(buf)));
}
/**
* Test that we don't call verifiedByClient() when the client only
* reads a partial block.
*/
@Test
public void testCompletePartialRead() throws Exception {
// Ask for half the file
BlockReader reader = util.getBlockReader(testBlock,FILE_SIZE_K * 1024 / 2);
Datanode dn = util.getDatanode(testBlock);
DataBlockScanner scanner = spy(dn.blockScanner);
dn.blockScanner = scanner;
// And read half the file
util.readAndCheckEOS(reader,FILE_SIZE_K * 1024 / 2,true);
verify(scanner,never()).verifiedByClient(Mockito.isA(Block.class));
reader.close();
}
项目:hadoop
文件:TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf,LocatedBlock lblock,boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
IOException ioe = null;
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
setFileName(BlockReaderFactory.getFileName(targetAddr,block.getBlockId())).
setBlock(block).
setBlockToken(lblock.getBlockToken()).
setInetSocketAddress(targetAddr).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestBlockTokenWithDFS").
setDatanodeInfo(nodes[0]).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
} catch (IOException ex) {
ioe = ex;
} finally {
if (blockReader != null) {
try {
blockReader.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
if (shouldSucceed) {
Assert.assertNotNull("OP_READ_BLOCK: access token is invalid,"
+ "when it is expected to be valid",blockReader);
} else {
Assert.assertNotNull("OP_READ_BLOCK: access token is valid,"
+ "when it is expected to be invalid",ioe);
Assert.assertTrue(
"OP_READ_BLOCK Failed due to reasons other than access token: ",ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
setInetSocketAddress(targetAddr).
setBlock(block).
setFileName(BlockReaderFactory.getFileName(targetAddr,block.getBlockId())).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestDatanodeVolumeFailure").
setDatanodeInfo(datanode).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
项目:aliyun-oss-hadoop-fs
文件:TestBlockTokenWithDFS.java
protected void tryRead(final Configuration conf,boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
IOException ioe = null;
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
setFileName(BlockReaderFactory.getFileName(targetAddr,block.getBlockId())).
setBlock(block).
setBlockToken(lblock.getBlockToken()).
setInetSocketAddress(targetAddr).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestBlockTokenWithDFS").
setDatanodeInfo(nodes[0]).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DfsutilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
} catch (IOException ex) {
ioe = ex;
} finally {
if (blockReader != null) {
try {
blockReader.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
if (shouldSucceed) {
Assert.assertNotNull("OP_READ_BLOCK: access token is invalid,ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
setInetSocketAddress(targetAddr).
setBlock(block).
setFileName(BlockReaderFactory.getFileName(targetAddr,block.getBlockId())).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestDatanodeVolumeFailure").
setDatanodeInfo(datanode).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DfsutilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
项目:big-c
文件:TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf,ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf,ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
/**
* Test that copy on write for blocks works correctly
*
* @throws NoSuchFieldException
* @throws SecurityException
* @throws illegalaccessexception
* @throws IllegalArgumentException
*/
public void testSlowDn() throws IOException,SecurityException,NoSuchFieldException,IllegalArgumentException,illegalaccessexception {
Configuration conf = new Configuration();
conf.setLong("dfs.bytes.to.check.read.speed",128 * 1024);
conf.setLong("dfs.min.read.speed.bps",1024 * 200);
conf.setBoolean("dfs.read.switch.for.slow",true);
MiniDFSCluster cluster = new MiniDFSCluster(conf,2,null);
FileSystem fs = cluster.getFileSystem();
FSDataInputStream in = null;
try {
// create a new file,write to it and close it.
//
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = createFile(fs,file1,2);
writeFile(stm);
stm.close();
in = fs.open(file1);
in.readByte();
DFSInputStream dfsClientIn = findDFSClientInputStream(in);
Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
blockReaderField.setAccessible(true);
BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);
blockReader.setArtificialSlowdown(1000);
blockReader.isReadLocal = false;
blockReader.isReadRackLocal = false;
for (int i = 0; i < 1024; i++) {
in.readByte();
}
blockReader.setArtificialSlowdown(0);
for (int i = 1024; i < fileSize - 1; i++) {
in.readByte();
}
ConcurrentHashMap<DatanodeInfo,DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
TestCase.assertEquals(1,deadNodes.size());
} finally {
if (in != null) {
in.close();
}
fs.close();
cluster.shutdown();
}
}
项目:hadoop-EAR
文件:JspHelper.java
public void streamBlockInAscii(InetSocketAddress addr,JspWriter out)
throws IOException {
if (chunkSizetoView == 0) return;
Socket s = new Socket();
s.connect(addr,blockSize - offsetIntoBlock);
// Use the block name for file name.
BlockReader blockReader =
BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,s,addr.toString() + ":" + blockId,genStamp,(int)amtToRead);
}
catch (IOException e) {
retries--;
if (retries == 0)
throw new IOException("Could not read data from datanode");
continue;
}
amtToRead -= numRead;
readOffset += numRead;
}
blockReader = null;
s.close();
out.print(new String(buf));
}
项目:hadoop-plus
文件:JspHelper.java
public static void streamBlockInAscii(InetSocketAddress addr,String poolId,Configuration conf,DFSClient.Conf dfsConf,DataEncryptionKey encryptionKey)
throws IOException {
if (chunkSizetoView == 0) return;
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(addr,HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
int amtToRead = (int)Math.min(chunkSizetoView,blockSize - offsetIntoBlock);
// Use the block name for file name.
String file = BlockReaderFactory.getFileName(addr,poolId,blockId);
BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf,new ExtendedBlock(poolId,"JspHelper",TcpPeerServer.peerFromSocketAndKey(s,encryptionKey),new DatanodeID(addr.getAddress().getHostAddress(),addr.getHostName(),addr.getPort(),0),false);
final byte[] buf = new byte[amtToRead];
int readOffset = 0;
int retries = 2;
while ( amtToRead > 0 ) {
int numRead = amtToRead;
try {
blockReader.readFully(buf,amtToRead);
}
catch (IOException e) {
retries--;
if (retries == 0)
throw new IOException("Could not read data from datanode");
continue;
}
amtToRead -= numRead;
readOffset += numRead;
}
blockReader.close();
out.print(HtmlQuoting.quoteHtmlChars(new String(buf,Charsets.UTF_8)));
}
项目:hadoop-plus
文件:TestBlockTokenWithDFS.java
private static void tryRead(Configuration conf,boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
Socket s = null;
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr,HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory.getFileName(targetAddr,block.getBlockId());
blockReader = BlockReaderFactory.newBlockReader(
new DFSClient.Conf(conf),"TestBlockTokenWithDFS",nodes[0],false);
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
assertFalse("OP_READ_BLOCK: access token is invalid,shouldSucceed);
return;
}
fail("OP_READ_BLOCK Failed due to reasons other than access token: "
+ StringUtils.stringifyException(ex));
} finally {
if (s != null) {
try {
s.close();
} catch (IOException iex) {
} finally {
s = null;
}
}
}
if (blockReader == null) {
fail("OP_READ_BLOCK Failed due to reasons other than access token");
}
assertTrue("OP_READ_BLOCK: access token is valid,"
+ "when it is expected to be invalid",shouldSucceed);
}
项目:FlexMap
文件:TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf,ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
项目:hops
文件:JspHelper.java
public static void streamBlockInAscii(InetSocketAddress addr,DataEncryptionKey encryptionKey) throws IOException {
if (chunkSizetoView == 0) {
return;
}
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(addr,HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
int amtToRead =
(int) Math.min(chunkSizetoView,blockSize - offsetIntoBlock);
// Use the block name for file name.
String file = BlockReaderFactory.getFileName(addr,blockId);
BlockReader blockReader = BlockReaderFactory.newBlockReader(conf,encryptionKey);
byte[] buf = new byte[(int) amtToRead];
int readOffset = 0;
int retries = 2;
while (amtToRead > 0) {
int numRead = amtToRead;
try {
blockReader.readFully(buf,amtToRead);
} catch (IOException e) {
retries--;
if (retries == 0) {
throw new IOException("Could not read data from datanode");
}
continue;
}
amtToRead -= numRead;
readOffset += numRead;
}
blockReader = null;
s.close();
out.print(HtmlQuoting.quoteHtmlChars(new String(buf,Charsets.UTF_8)));
}
项目:hops
文件:TestBlockTokenWithDFS.java
private static void tryRead(Configuration conf,HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
String file = BlockReaderFactory
.getFileName(targetAddr,block.getBlockId());
blockReader = BlockReaderFactory
.newBlockReader(conf,null);
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
assertFalse("OP_READ_BLOCK: access token is invalid," +
"when it is expected to be valid",shouldSucceed);
return;
}
fail("OP_READ_BLOCK Failed due to reasons other than access token: " +
StringUtils.stringifyException(ex));
} finally {
if (s != null) {
try {
s.close();
} catch (IOException iex) {
} finally {
s = null;
}
}
}
if (blockReader == null) {
fail("OP_READ_BLOCK Failed due to reasons other than access token");
}
assertTrue("OP_READ_BLOCK: access token is valid," +
"when it is expected to be invalid",shouldSucceed);
}
项目:hadoop-TCP
文件:JspHelper.java
public static void streamBlockInAscii(InetSocketAddress addr,CachingStrategy.newDefaultStrategy());
final byte[] buf = new byte[amtToRead];
int readOffset = 0;
int retries = 2;
while ( amtToRead > 0 ) {
int numRead = amtToRead;
try {
blockReader.readFully(buf,Charsets.UTF_8)));
}
项目:hadoop-TCP
文件:TestBlockTokenWithDFS.java
private static void tryRead(Configuration conf,CachingStrategy.newDefaultStrategy());
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
assertFalse("OP_READ_BLOCK: access token is invalid,shouldSucceed);
}
项目:hardfs
文件:JspHelper.java
public static void streamBlockInAscii(InetSocketAddress addr,Charsets.UTF_8)));
}
项目:hardfs
文件:TestBlockTokenWithDFS.java
private static void tryRead(Configuration conf,shouldSucceed);
}
项目:hadoop-on-lustre2
文件:TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf,block.getBlockId())).
setBlock(block).
setBlockToken(lblock.getBlockToken()).
setInetSocketAddress(targetAddr).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestBlockTokenWithDFS").
setDatanodeInfo(nodes[0]).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr,ioe instanceof InvalidBlockTokenException);
}
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode,block.getBlockId())).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestDatanodeVolumeFailure").
setDatanodeInfo(datanode).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr,HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IoUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
项目:cumulus
文件:TestBlockTokenWithDFS.java
private static void tryRead(Configuration conf,boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
Socket s = null;
BlockReader blockReader = null;
Block block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
s = new Socket();
s.connect(targetAddr,HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
String file = BlockReader.getFileName(targetAddr,block.getBlockId());
blockReader = BlockReader.newBlockReader(s,4096));
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
assertFalse("OP_READ_BLOCK: access token is invalid,shouldSucceed);
return;
}
fail("OP_READ_BLOCK Failed due to reasons other than access token");
} finally {
if (s != null) {
try {
s.close();
} catch (IOException iex) {
} finally {
s = null;
}
}
}
if (blockReader == null) {
fail("OP_READ_BLOCK Failed due to reasons other than access token");
}
assertTrue("OP_READ_BLOCK: access token is valid,shouldSucceed);
}
/**
* Test that copy on write for blocks works correctly
*
* @throws NoSuchFieldException
* @throws SecurityException
* @throws illegalaccessexception
* @throws IllegalArgumentException
*/
public void testSlowDn() throws IOException,illegalaccessexception {
Configuration conf = new Configuration();
conf.setLong("dfs.min.read.speed.bps",1024 * 200);
MiniDFSCluster cluster = new MiniDFSCluster(conf,2);
writeFile(stm);
stm.close();
in = fs.open(file1);
in.readByte();
DFSInputStream dfsClientIn = findDFSClientInputStream(in);
Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
blockReaderField.setAccessible(true);
BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);
blockReader.setArtificialSlowdown(1000);
blockReader.isReadLocal = false;
blockReader.isReadRackLocal = false;
blockReader.ENABLE_THROW_FOR_SLOW = true;
for (int i = 0; i < 1024; i++) {
in.readByte();
}
blockReader.setArtificialSlowdown(0);
for (int i = 1024; i < fileSize - 1; i++) {
in.readByte();
}
ConcurrentHashMap<DatanodeInfo,deadNodes.size());
} finally {
if (in != null) {
in.close();
}
fs.close();
cluster.shutdown();
}
}
项目:RDFS
文件:JspHelper.java
public void streamBlockInAscii(InetSocketAddress addr,(int)amtToRead);
}
catch (IOException e) {
retries--;
if (retries == 0)
throw new IOException("Could not read data from datanode");
continue;
}
amtToRead -= numRead;
readOffset += numRead;
}
blockReader = null;
s.close();
out.print(new String(buf));
}
项目:aliyun-oss-hadoop-fs
文件:ErasureCodingWorker.java
/**
* StripedReader is used to read from one source DN,it contains a block
* reader,buffer and striped block index.
* Only allocate StripedReader once for one source,and the StripedReader
* has the same array order with sources. Typically we only need to allocate
* minimum number (minrequiredSources) of StripedReader,and allocate
* new for new source DN if some existing DN invalid or slow.
* If some source DN is corrupt,set the corresponding blockReader to
* null and will never read from it again.
*
* @param i the array index of sources
* @param offsetInBlock offset for the internal block
* @return StripedReader
*/
private StripedReader addStripedReader(int i,long offsetInBlock) {
StripedReader reader = new StripedReader(liveIndices[i]);
stripedReaders.add(reader);
BlockReader blockReader = newBlockReader(
getBlock(blockGroup,liveIndices[i]),sources[i]);
if (blockReader != null) {
initChecksumAndBufferSizeIfNeeded(blockReader);
reader.blockReader = blockReader;
}
reader.buffer = allocateBuffer(bufferSize);
return reader;
}
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。