微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff的实例源码

项目:hadoop-plus    文件SnapshotFSImageFormat.java   
/**
 * Load a node stored in the created list from fsimage.
 * @param creatednodeName The name of the created node.
 * @param parent The directory that the created list belongs to.
 * @return The created node.
 */
private static INode loadCreated(byte[] creatednodeName,INodeDirectoryWithSnapshot parent) throws IOException {
  // the INode in the created list should be a reference to another INode
  // in posterior SnapshotDiffs or one of the current children
  for (DirectoryDiff postDiff : parent.getDiffs()) {
    final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,creatednodeName);
    if (d != null) {
      return d;
    } // else go to the next SnapshotDiff
  } 
  // use the current child
  INode currentChild = parent.getChild(creatednodeName,null);
  if (currentChild == null) {
    throw new IOException("Cannot find an INode associated with the INode "
        + Dfsutil.bytes2String(creatednodeName)
        + " in created list while loading FSImage.");
  }
  return currentChild;
}
项目:hadoop-TCP    文件SnapshotFSImageFormat.java   
/**
 * Load a node stored in the created list from fsimage.
 * @param creatednodeName The name of the created node.
 * @param parent The directory that the created list belongs to.
 * @return The created node.
 */
private static INode loadCreated(byte[] creatednodeName,null);
  if (currentChild == null) {
    throw new IOException("Cannot find an INode associated with the INode "
        + Dfsutil.bytes2String(creatednodeName)
        + " in created list while loading FSImage.");
  }
  return currentChild;
}
项目:hardfs    文件SnapshotFSImageFormat.java   
/**
 * Load a node stored in the created list from fsimage.
 * @param creatednodeName The name of the created node.
 * @param parent The directory that the created list belongs to.
 * @return The created node.
 */
private static INode loadCreated(byte[] creatednodeName,null);
  if (currentChild == null) {
    throw new IOException("Cannot find an INode associated with the INode "
        + Dfsutil.bytes2String(creatednodeName)
        + " in created list while loading FSImage.");
  }
  return currentChild;
}
项目:hadoop-plus    文件SnapshotFSImageFormat.java   
/**
 * Load {@link DirectoryDiff} from fsimage.
 * @param parent The directory that the SnapshotDiff belongs to.
 * @param in The {@link DataInput} instance to read.
 * @param loader The {@link Loader} instance that this loading procedure is 
 *               using.
 * @return A {@link DirectoryDiff}.
 */
private static DirectoryDiff loadDirectoryDiff(
    INodeDirectoryWithSnapshot parent,DataInput in,FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load DirectoryDiff#childrenSize
  int childrenSize = in.readInt();

  // 3. Load DirectoryDiff#snapshotINode 
  INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
      snapshot,in,loader);

  // 4. Load the created list in SnapshotDiff#Diff
  List<INode> createdList = loadCreatedList(parent,in);

  // 5. Load the deleted list in SnapshotDiff#Diff
  List<INode> deletedList = loadDeletedList(parent,createdList,loader);

  // 6. Compose the SnapshotDiff
  List<DirectoryDiff> diffs = parent.getDiffs().asList();
  DirectoryDiff sdiff = new DirectoryDiff(snapshot,snapshotINode,diffs.isEmpty() ? null : diffs.get(0),childrenSize,deletedList);
  return sdiff;
}
项目:hadoop-plus    文件TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,String[] sortednames,String[] names) {
  ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
  assertEquals(sortednames.length,listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortednames[i],listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length,listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    assertEquals(names[i],listByTime.get(i).getSnapshot().getRoot().getLocalName());
  }
}
项目:hadoop-TCP    文件SnapshotFSImageFormat.java   
/**
 * Load {@link DirectoryDiff} from fsimage.
 * @param parent The directory that the SnapshotDiff belongs to.
 * @param in The {@link DataInput} instance to read.
 * @param loader The {@link Loader} instance that this loading procedure is 
 *               using.
 * @return A {@link DirectoryDiff}.
 */
private static DirectoryDiff loadDirectoryDiff(
    INodeDirectoryWithSnapshot parent,deletedList);
  return sdiff;
}
项目:hadoop-TCP    文件TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,listByTime.get(i).getSnapshot().getRoot().getLocalName());
  }
}
项目:hardfs    文件SnapshotFSImageFormat.java   
/**
 * Load {@link DirectoryDiff} from fsimage.
 * @param parent The directory that the SnapshotDiff belongs to.
 * @param in The {@link DataInput} instance to read.
 * @param loader The {@link Loader} instance that this loading procedure is 
 *               using.
 * @return A {@link DirectoryDiff}.
 */
private static DirectoryDiff loadDirectoryDiff(
    INodeDirectoryWithSnapshot parent,deletedList);
  return sdiff;
}
项目:hardfs    文件TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,listByTime.get(i).getSnapshot().getRoot().getLocalName());
  }
}
项目:hadoop-plus    文件TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root,"s1");

  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPLICATION).build();
  cluster.waitactive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPLICATION).build();
  cluster.waitactive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  INodeDirectorySnapshottable rootNode = 
      (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
  assertTrue("The children list of root should be empty",rootNode.getChildrenList(null).isEmpty());
  // one snapshot on root: s1
  List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
  assertEquals(1,diffList.size());
  assertEquals("s1",diffList.get(0).getSnapshot().getRoot().getLocalName());

  // check SnapshotManager's snapshottable directory list
  assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
  SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
      .getSnapshottableDirListing(null);
  assertEquals(root,sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPLICATION).build();
  cluster.waitactive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1,"foo");
  final Path bar = new Path(foo,"bar");
  DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
  final Path dir2file = new Path(sdir2,"file");
  DFSTestUtil.createFile(hdfs,dir2file,SEED);

  SnapshottestHelper.createSnapshot(hdfs,sdir1,"s1");

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(),anyBoolean(),(Snapshot) anyObject(),(INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2,"foo");
  boolean result = hdfs.rename(foo,newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1,dir1Children.size());
  assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1,dir1Diffs.size());
  assertEquals("s1",dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename,both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0,childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
      .getDiffs().asList();
  assertEquals(1,fooDiffs.size());
  assertEquals("s1",fooDiffs.get(0).snapshot.getRoot().getLocalName());

  final Path foo_s1 = SnapshottestHelper.getSnapshotPath(sdir1,"s1","foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1,dir2Children.size());
  assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2,"s1");

  // create foo after taking snapshot
  final Path foo = new Path(sdir1,SEED);

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(),the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1,childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);

  final Path foo_s1 = SnapshottestHelper.getSnapshotPath(sdir1,"foo");
  assertFalse(hdfs.exists(foo_s1));

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1,dir2Children.get(0).getLocalName());
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,"dir1");
  final Path dir2 = new Path(test,"dir2");
  final Path subdir2 = new Path(dir2,"subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);

  final Path foo = new Path(dir1,dir1,"s1");
  SnapshottestHelper.createSnapshot(hdfs,dir2,"s2");

  // set ns quota of dir2 to 5,so the current remaining is 2 (already has
  // dir2,subdir2,and s2)
  hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);

  final Path foo2 = new Path(subdir2,foo.getName());
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since foo/bar only be counted 
  // as 2 in NS quota. However,the rename operation will fail when adding
  // foo to subdir2,since we will create a snapshot diff for subdir2. 
  boolean rename = hdfs.rename(foo,foo2);
  assertFalse(rename);

  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(null));
  assertEquals(1,childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
  INode barNode = fsdir.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode,barNode.getParent());
  List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
      .getDiffs().asList();
  assertEquals(1,diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(3,counts.get(Quota.NAMESPACE));
  assertEquals(0,counts.get(Quota.disKSPACE));
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(null));
  assertEquals(1,childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node,subdir2Node.getParent());
  assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
  diffList = ((INodeDirectorySnapshottable) dir2Node)
      .getDiffs().asList();
  assertEquals(1,diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,"dir2");
  final Path sub_dir2 = new Path(dir2,"subdir");
  final Path subsub_dir2 = new Path(sub_dir2,"subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);

  final Path foo = new Path(dir1,"foo");
  hdfs.mkdirs(foo);

  SnapshottestHelper.createSnapshot(hdfs,"s2");

  // set ns quota of dir2 to 4,so the current remaining is 0 (already has
  // dir2,sub_dir2,subsub_dir2,4,Long.MAX_VALUE - 1);

  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However,the rename operation will fail when removing
  // subsub_dir2 since this step tries to add a snapshot diff in sub_dir2.
  try {
    hdfs.rename(foo,Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (QuotaExceededException e) {
    String msg = "Failed to record modification for snapshot: "
        + "The NameSpace quota (directories and files)"
        + " is exceeded: quota=4 file count=5"; 
    GenericTestUtils.assertExceptionContains(msg,e);
  }

  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(null));
  assertEquals(1,childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
  assertSame(dir1Node,fooNode.getParent());
  List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
      .getDiffs().asList();
  assertEquals(1,diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(4,childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertTrue(subdir2Node.getClass() == INodeDirectoryWithSnapshot.class);
  assertSame(dir2Node,fsdir.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node,subsubdir2Node.getParent());

  diffList = ((INodeDirectorySnapshottable) dir2Node).getDiffs().asList();
  assertEquals(1,diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  diffList = ((INodeDirectoryWithSnapshot) subdir2Node).getDiffs().asList();
  assertEquals(0,diffList.size());
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root,SEED);

  // create a snapshot on root
  SnapshottestHelper.createSnapshot(hdfs,root,snap1);

  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar,invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
  }

  // check
  INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
      .getINode4Write(foo.toString());
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1,children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1,diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  assertEquals(snap1,Snapshot.getSnapshotName(diff.snapshot));
  // after undo,the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());

  // bar was converted to filewithsnapshot while renaming
  INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
      .getINode4Write(bar.toString());
  assertSame(barNode,children.get(0));
  assertSame(fooNode,barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1,barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(snap1,Snapshot.getSnapshotName(barDiff.snapshot));

  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage,foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPL).build();
  cluster.waitactive();
  restartClusterandCheckImage(true);
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,SEED);
  hdfs.mkdirs(sdir2);

  SnapshottestHelper.createSnapshot(hdfs,sdir2,"s2");

  final Path foo2 = new Path(sdir2,"foo");
  hdfs.rename(foo,foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2,"bar2");
  DFSTestUtil.createFile(hdfs,bar2,SEED);
  final Path bar3 = new Path(foo2,"bar3");
  DFSTestUtil.createFile(hdfs,bar3,SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2,"s3");

  // delete foo2
  hdfs.delete(foo2,true);
  // delete s3
  hdfs.deleteSnapshot(sdir2,"s3");

  // check
  final INodeDirectorySnapshottable dir1Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
  assertEquals(4,dir1Node.getNamespace());
  final INodeDirectorySnapshottable dir2Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
  assertEquals(2,dir2Node.getNamespace());

  final Path foo_s1 = SnapshottestHelper.getSnapshotPath(sdir1,foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1,wc.getReferenceCount());
  INodeDirectoryWithSnapshot fooNode = 
      (INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1,children.size());
  assertEquals(bar.getName(),children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1,Snapshot.getSnapshotName(diffList.get(0).snapshot));
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0,diff.getList(ListType.CREATED).size());
  assertEquals(0,diff.getList(ListType.DELETED).size());

  restartClusterandCheckImage(true);
}
项目:hadoop-plus    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,"s3");

  // rename foo2 again
  hdfs.rename(foo2,foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2,"s3");

  // check
  final INodeDirectorySnapshottable dir1Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  assertEquals(9,foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2,wc.getReferenceCount());
  INodeDirectoryWithSnapshot fooNode = 
      (INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(3,children.get(0).getLocalName());
  assertEquals(bar2.getName(),children.get(1).getLocalName());
  assertEquals(bar3.getName(),children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1,Snapshot.getSnapshotName(diffList.get(0).snapshot));
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2,diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc,wc2);
  assertSame(fooRef2,wc.getParentReference());

  restartClusterandCheckImage(true);
}
项目:hadoop-plus    文件TestSetQuotaWithSnapshot.java   
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);

  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET);
  INode dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0,((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());

  hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1);
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0,((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());

  // allow snapshot on dir and create snapshot s1
  SnapshottestHelper.createSnapshot(hdfs,dir,"s1");

  // clear quota of dir
  hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET);
  // dir should still be a snapshottable directory
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(1,((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());
  SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
  assertEquals(1,status.length);
  assertEquals(dir,status[0].getFullPath());

  final Path subDir = new Path(dir,"sub");
  hdfs.mkdirs(subDir);
  hdfs.createSnapshot(dir,"s2");
  final Path file = new Path(subDir,file,REPLICATION,seed);
  hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET);
  INode subNode = fsdir.getINode4Write(subDir.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> diffList = ((INodeDirectoryWithSnapshot) subNode).getDiffs().asList();
  assertEquals(1,diffList.size());
  assertEquals("s2",Snapshot.getSnapshotName(diffList.get(0).snapshot));
  List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
  assertEquals(1,createdList.size());
  assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0));
}
项目:hadoop-TCP    文件TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root,sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPLICATION).build();
  cluster.waitactive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
项目:hadoop-TCP    文件TestINodeFileUnderConstructionWithSnapshot.java   
/**
 * Test snapshot during file appending,before the corresponding
 * {@link FSDataOutputStream} instance closes.
 */
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
  Path file = new Path(dir,seed);

  // 1. append without closing stream --> create snapshot
  HdfsDataOutputStream out = appendFileWithoutClosing(file,BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  SnapshottestHelper.createSnapshot(hdfs,"s0");
  out.close();

  // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
  // deleted list,with size BLOCKSIZE*2
  INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
  assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize());
  INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
      .getINode(dir.toString());
  DirectoryDiff last = dirNode.getDiffs().getLast();
  Snapshot s0 = last.snapshot;

  // 2. append without closing stream
  out = appendFileWithoutClosing(file,BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // re-check nodeInDeleted_S0
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize(s0));

  // 3. take snapshot --> close stream
  hdfs.createSnapshot(dir,"s1");
  out.close();

  // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
  // have been stored in s1's deleted list
  fileNode = (INodeFile) fsdir.getINode(file.toString());
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  last = dirNode.getDiffs().getLast();
  Snapshot s1 = last.snapshot;
  assertTrue(fileNode instanceof INodeFileWithSnapshot);
  assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(s1));

  // 4. modify file --> append without closing stream --> take snapshot -->
  // close stream
  hdfs.setReplication(file,(short) (REPLICATION - 1));
  out = appendFileWithoutClosing(file,BLOCKSIZE);
  hdfs.createSnapshot(dir,"s2");
  out.close();

  // re-check the size of nodeInDeleted_S1
  assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(s1));
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,dir2Children.get(0).getLocalName());
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,dir2Children.get(0).getLocalName());
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,diffList.size());
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root,foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPL).build();
  cluster.waitactive();
  restartClusterandCheckImage(true);
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,diff.getList(ListType.DELETED).size());

  restartClusterandCheckImage(true);
}
项目:hadoop-TCP    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,wc.getParentReference());

  restartClusterandCheckImage(true);
}
项目:hadoop-TCP    文件TestSetQuotaWithSnapshot.java   
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);

  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir,createdList.get(0));
}
项目:hardfs    文件TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root,sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPLICATION).build();
  cluster.waitactive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
项目:hardfs    文件TestINodeFileUnderConstructionWithSnapshot.java   
/**
 * Test snapshot during file appending,fileNode.computeFileSize(s1));
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,dir2Children.get(0).getLocalName());
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename,dir2Children.get(0).getLocalName());
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test,diffList.size());
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root,foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDatanodes(REPL).build();
  cluster.waitactive();
  restartClusterandCheckImage(true);
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,diff.getList(ListType.DELETED).size());

  restartClusterandCheckImage(true);
}
项目:hardfs    文件TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1,wc.getParentReference());

  restartClusterandCheckImage(true);
}
项目:hardfs    文件TestSetQuotaWithSnapshot.java   
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);

  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir,createdList.get(0));
}

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。