下面列出了org.apache.hadoop.fs.ParentNotDirectoryException#org.apache.hadoop.hdfs.DFSUtil 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation connectUgi = ugi.getRealUser();
if (connectUgi == null) {
connectUgi = ugi;
}
try {
return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
InetSocketAddress serviceAddr = SecurityUtil
.getTokenServiceAddr(token);
return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
(Token<DelegationTokenIdentifier>) token);
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
/**
* Verify child's name for fs limit.
*
* @param childName byte[] containing new child name
* @param parentPath String containing parent path
* @throws PathComponentTooLongException child's name is too long.
*/
void verifyMaxComponentLength(byte[] childName, String parentPath)
throws PathComponentTooLongException {
if (maxComponentLength == 0) {
return;
}
final int length = childName.length;
if (length > maxComponentLength) {
final PathComponentTooLongException e = new PathComponentTooLongException(
maxComponentLength, length, parentPath,
DFSUtil.bytes2String(childName));
if (namesystem.isImageLoaded()) {
throw e;
} else {
// Do not throw if edits log is still being processed
NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
}
}
}
@VisibleForTesting
public static boolean doRollback(Configuration conf,
boolean isConfirmationNeeded) throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
System.err.print(
"\"rollBack\" will remove the current state of the file system,\n"
+ "returning you to the state prior to initiating your recent.\n"
+ "upgrade. This action is permanent and cannot be undone. If you\n"
+ "are performing a rollback in an HA environment, you should be\n"
+ "certain that no NameNode process is running on any host.");
if (isConfirmationNeeded) {
if (!confirmPrompt("Roll back file system state?")) {
System.err.println("Rollback aborted.");
return true;
}
}
nsys.getFSImage().doRollback(nsys);
return false;
}
void start() throws IOException {
final InetSocketAddress httpAddr = getHttpAddress(conf);
final String httpsAddrString = conf.get(
NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "nfs3",
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);
this.httpServer = builder.build();
this.httpServer.start();
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
int connIdx = 0;
if (policy.isHttpEnabled()) {
infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
}
if (policy.isHttpsEnabled()) {
infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
}
}
public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
if (entry == null) {
return null;
}
ByteString sourcePath = ByteString
.copyFrom(entry.getSourcePath() == null ? DFSUtil.EMPTY_BYTES : entry
.getSourcePath());
String modification = entry.getType().getLabel();
SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
.newBuilder().setFullpath(sourcePath)
.setModificationLabel(modification);
if (entry.getType() == DiffType.RENAME) {
ByteString targetPath = ByteString
.copyFrom(entry.getTargetPath() == null ? DFSUtil.EMPTY_BYTES : entry
.getTargetPath());
builder.setTargetPath(targetPath);
}
return builder.build();
}
/**
* Creates the required number of files with one block each
* @param nCount Number of INodes to create
* @return Array of INode files
*/
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
if(nCount <= 0)
return new INodeFile[1];
replication = 3;
preferredBlockSize = 128 * 1024 * 1024;
INodeFile[] iNodes = new INodeFile[nCount];
for (int i = 0; i < nCount; i++) {
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
preferredBlockSize, (byte)0);
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
iNodes[i].addBlock(newblock);
}
return iNodes;
}
private String toString(boolean vaildateObject) {
if (vaildateObject) {
validate();
}
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": path = ").append(DFSUtil.byteArray2PathString(path))
.append("\n inodes = ");
if (inodes == null) {
b.append("null");
} else if (inodes.length == 0) {
b.append("[]");
} else {
b.append("[").append(toString(inodes[0]));
for(int i = 1; i < inodes.length; i++) {
b.append(", ").append(toString(inodes[i]));
}
b.append("], length=").append(inodes.length);
}
b.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotId = ").append(snapshotId);
return b.toString();
}
public static SnapshottableDirectoryStatusProto convert(
SnapshottableDirectoryStatus status) {
if (status == null) {
return null;
}
int snapshotNumber = status.getSnapshotNumber();
int snapshotQuota = status.getSnapshotQuota();
byte[] parentFullPath = status.getParentFullPath();
ByteString parentFullPathBytes = ByteString.copyFrom(
parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath);
HdfsFileStatusProto fs = convert(status.getDirStatus());
SnapshottableDirectoryStatusProto.Builder builder =
SnapshottableDirectoryStatusProto
.newBuilder().setSnapshotNumber(snapshotNumber)
.setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
.setDirStatus(fs);
return builder.build();
}
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
try {
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
"-p", "/foo", "/bar");
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
Assert.assertEquals(1, movePaths.size());
URI nn = namenodes.iterator().next();
Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
Assert.assertTrue(movePaths.containsKey(nn));
checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
} finally {
cluster.shutdown();
}
}
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
if (nnId == null) {
String msg = "Could not get the namenode ID of this node. " +
"You may run zkfc on the node other than namenode.";
throw new HadoopIllegalArgumentException(msg);
}
NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
NNHAServiceTarget localTarget = new NNHAServiceTarget(
localNNConf, nsId, nnId);
return new DFSZKFailoverController(localNNConf, localTarget);
}
private NamespaceInfo handshake(Configuration conf) throws IOException {
// connect to name node
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
DFSUtil.getHttpClientScheme(conf)).toURL();
// get version and id info from the name-node
NamespaceInfo nsInfo = null;
while(!isStopRequested()) {
try {
nsInfo = handshake(namenode);
break;
} catch(SocketTimeoutException e) { // name-node is busy
LOG.info("Problem connecting to server: " + nnAddress);
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
LOG.warn("Encountered exception ", e);
}
}
}
return nsInfo;
}
@VisibleForTesting
public static boolean doRollback(Configuration conf,
boolean isConfirmationNeeded) throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
System.err.print(
"\"rollBack\" will remove the current state of the file system,\n"
+ "returning you to the state prior to initiating your recent.\n"
+ "upgrade. This action is permanent and cannot be undone. If you\n"
+ "are performing a rollback in an HA environment, you should be\n"
+ "certain that no NameNode process is running on any host.");
if (isConfirmationNeeded) {
if (!confirmPrompt("Roll back file system state?")) {
System.err.println("Rollback aborted.");
return true;
}
}
nsys.getFSImage().doRollback(nsys);
return false;
}
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
if (nnId == null) {
String msg = "Could not get the namenode ID of this node. " +
"You may run zkfc on the node other than namenode.";
throw new HadoopIllegalArgumentException(msg);
}
NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
NNHAServiceTarget localTarget = new NNHAServiceTarget(
localNNConf, nsId, nnId);
return new DFSZKFailoverController(localNNConf, localTarget);
}
/**
*/
public static void main(String argv[]) throws Exception {
if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
NameNode namenode = createNameNode(argv, null);
if (namenode != null) {
namenode.join();
}
} catch (Throwable e) {
LOG.error("Failed to start namenode.", e);
terminate(1, e);
}
}
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol =
DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
/**
* Sets the required configurations for performing failover
*/
public static void setFailoverConfigurations(Configuration conf,
String logicalName, InetSocketAddress nnAddr1,
InetSocketAddress nnAddr2) {
String nameNodeId1 = "nn1";
String nameNodeId2 = "nn2";
String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
String address2 = "hdfs://" + nnAddr2.getHostName() + ":" + nnAddr2.getPort();
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
logicalName, nameNodeId1), address1);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
logicalName, nameNodeId2), address2);
conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, logicalName),
nameNodeId1 + "," + nameNodeId2);
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
ConfiguredFailoverProxyProvider.class.getName());
conf.set("fs.defaultFS", "hdfs://" + logicalName);
}
public static void main(String[] args) throws Exception {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
OzoneConfiguration conf = new OzoneConfiguration();
conf.addResource(new OzoneConfiguration());
int res = ToolRunner.run(new OzoneGetConf(conf), args);
System.exit(res);
}
/**
* Create a test configuration that will exercise the initializeGenericKeys
* code path. This is a regression test for HDFS-4279.
*/
static void setupRecoveryTestConf(Configuration conf) throws IOException {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
String baseDir = System.getProperty(
MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
File nameDir = new File(baseDir, "nameR");
File secondaryDir = new File(baseDir, "namesecondaryR");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
nameDir.getCanonicalPath());
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
secondaryDir.getCanonicalPath());
conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
FileUtils.deleteQuietly(nameDir);
if (!nameDir.mkdirs()) {
throw new RuntimeException("failed to make directory " +
nameDir.getAbsolutePath());
}
FileUtils.deleteQuietly(secondaryDir);
if (!secondaryDir.mkdirs()) {
throw new RuntimeException("failed to make directory " +
secondaryDir.getAbsolutePath());
}
}
/**
* Rename a directory to its prior descendant, and verify the diff report.
*/
@Test
public void testDiffReportWithRename() throws Exception {
final Path root = new Path("/");
final Path sdir1 = new Path(root, "dir1");
final Path sdir2 = new Path(root, "dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
// create snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo");
hdfs.rename(foo, foo2);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
// let's delete /dir2 to make things more complicated
hdfs.delete(sdir2, true);
verifyDiffReport(root, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/foo"),
DFSUtil.string2Bytes("dir2/bar/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("dir1/foo/bar")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.RENAME, DFSUtil
.string2Bytes("dir1/foo/bar"), DFSUtil.string2Bytes("dir2/bar")));
}
public static void main(String[] args) throws Exception {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
System.exit(res);
}
public static void main(String[] args) throws Exception {
// -files option is also used by GenericOptionsParser
// Make sure that is not the first argument for fsck
int res = -1;
if ((args.length == 0) || ("-files".equals(args[0]))) {
printUsage(System.err);
ToolRunner.printGenericCommandUsage(System.err);
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
res = 0;
} else {
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
}
System.exit(res);
}
/**
* Renaming a file/dir then delete the ancestor dir of the rename target
* should be reported as deleted.
*/
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
final Path root = new Path("/");
final Path dir1 = new Path(root, "dir1");
final Path dir2 = new Path(root, "dir2");
final Path foo = new Path(dir1, "foo");
final Path fileInFoo = new Path(foo, "file");
final Path bar = new Path(dir2, "bar");
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE, DFSUtil
.string2Bytes("dir2/bar/file")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("dir1/foo/file"),
DFSUtil.string2Bytes("dir2/bar/file")));
// delete bar
hdfs.delete(bar, true);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
verifyDiffReport(root, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/foo/file")));
}
/**
* Create all the necessary directories
*/
private synchronized boolean mkdirsInternal(String src,
PermissionStatus permissions) throws IOException {
NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
if (isPermissionEnabled) {
checkTraverse(src);
}
if (dir.isDir(src)) {
// all the users of mkdirs() are used to expect 'true' even if
// a new directory is not created.
return true;
}
if (isInSafeMode())
throw new SafeModeException("Cannot create directory " + src, safeMode);
if (!DFSUtil.isValidName(src)) {
throw new IOException("Invalid directory name: " + src);
}
if (isPermissionEnabled) {
checkAncestorAccess(src, FsAction.WRITE);
}
// validate that we have enough inodes. This is, at best, a
// heuristic because the mkdirs() operation migth need to
// create multiple inodes.
checkFsObjectLimit();
if (!dir.mkdirs(src, permissions, false, now())) {
throw new IOException("Invalid directory name: " + src);
}
return true;
}
public void testString(String str) throws Exception {
String pathString = str;
byte[][] oldPathComponents = INode.getPathComponents(pathString);
byte[][] newPathComponents =
DFSUtil.bytes2byteArray(pathString.getBytes(Charsets.UTF_8),
(byte) Path.SEPARATOR_CHAR);
if (oldPathComponents[0] == null) {
assertTrue(oldPathComponents[0] == newPathComponents[0]);
} else {
assertTrue("Path components do not match for " + pathString,
Arrays.deepEquals(oldPathComponents, newPathComponents));
}
}
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
throws IOException {
CacheDirectiveInfo.Expiration ex = null;
if (ttlString != null) {
if (ttlString.equalsIgnoreCase("never")) {
ex = CacheDirectiveInfo.Expiration.NEVER;
} else {
long ttl = DFSUtil.parseRelativeTime(ttlString);
ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
}
}
return ex;
}
/**
* Run a balancer
* @param args Command line arguments
*/
public static void main(String[] args) {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
try {
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
} catch (Throwable e) {
LOG.error("Exiting balancer due an exception", e);
System.exit(-1);
}
}
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) {
return new LocatedFileStatus(getLen(), isDir(), getReplication(),
getBlockSize(), getModificationTime(),
getAccessTime(),
getPermission(), getOwner(), getGroup(),
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path
DFSUtil.locatedBlocks2Locations(getBlockLocations()));
}
/**
* Rename a single file across snapshottable dirs.
*/
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
hdfs.createSnapshot(sdir1, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
// change the replication factor of foo
hdfs.setReplication(newfoo, REPL_1);
// /dir2/.snapshot/s2/foo should still work
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status = hdfs.getFileStatus(foo_s2);
assertEquals(REPL, status.getReplication());
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
private Response post(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize,
final ExcludeDatanodesParam excludeDatanodes,
final NewLengthParam newLength
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case APPEND:
{
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), -1L, -1L,
excludeDatanodes.getValue(), bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
{
np.concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
case TRUNCATE:
{
// We treat each rest request as a separate client.
final boolean b = np.truncate(fullpath, newLength.getValue(),
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
private static boolean existsInDiffReport(List<DiffReportEntry> entries,
DiffType type, String sourcePath, String targetPath) {
for (DiffReportEntry entry : entries) {
if (entry.equals(new DiffReportEntry(type, DFSUtil
.string2Bytes(sourcePath), targetPath == null ? null : DFSUtil
.string2Bytes(targetPath)))) {
return true;
}
}
return false;
}