Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 45CD6119A7 for ; Thu, 9 May 2013 23:56:24 +0000 (UTC) Received: (qmail 34845 invoked by uid 500); 9 May 2013 23:56:24 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 34793 invoked by uid 500); 9 May 2013 23:56:24 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 34783 invoked by uid 99); 9 May 2013 23:56:24 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 May 2013 23:56:24 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 May 2013 23:56:17 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id D6BC223889C5; Thu, 9 May 2013 23:55:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1480838 [2/7] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/... Date: Thu, 09 May 2013 23:55:52 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130509235555.D6BC223889C5@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1480838&r1=1480837&r2=1480838&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Thu May 9 23:55:49 2013 @@ -42,28 +42,33 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; @@ -77,6 +82,10 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; @@ -85,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto; @@ -102,13 +112,13 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolMetaInterface; +import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.security.AccessControlException; @@ -873,5 +883,101 @@ public class ClientNamenodeProtocolTrans public Object getUnderlyingProxyObject() { return rpcProxy; } + + @Override + public String createSnapshot(String snapshotRoot, String snapshotName) + throws IOException { + final CreateSnapshotRequestProto.Builder builder + = CreateSnapshotRequestProto.newBuilder().setSnapshotRoot(snapshotRoot); + if (snapshotName != null) { + builder.setSnapshotName(snapshotName); + } + final CreateSnapshotRequestProto req = builder.build(); + try { + return rpcProxy.createSnapshot(null, req).getSnapshotPath(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override + public void deleteSnapshot(String snapshotRoot, String snapshotName) + throws IOException { + DeleteSnapshotRequestProto req = DeleteSnapshotRequestProto.newBuilder() + .setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build(); + try { + rpcProxy.deleteSnapshot(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void allowSnapshot(String snapshotRoot) throws IOException { + AllowSnapshotRequestProto req = AllowSnapshotRequestProto.newBuilder() + .setSnapshotRoot(snapshotRoot).build(); + try { + rpcProxy.allowSnapshot(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void disallowSnapshot(String snapshotRoot) throws IOException { + DisallowSnapshotRequestProto req = DisallowSnapshotRequestProto + .newBuilder().setSnapshotRoot(snapshotRoot).build(); + try { + rpcProxy.disallowSnapshot(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void renameSnapshot(String snapshotRoot, String snapshotOldName, + String snapshotNewName) throws IOException { + RenameSnapshotRequestProto req = RenameSnapshotRequestProto.newBuilder() + .setSnapshotRoot(snapshotRoot).setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName).build(); + try { + rpcProxy.renameSnapshot(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public SnapshottableDirectoryStatus[] getSnapshottableDirListing() + throws IOException { + GetSnapshottableDirListingRequestProto req = + GetSnapshottableDirListingRequestProto.newBuilder().build(); + try { + GetSnapshottableDirListingResponseProto result = rpcProxy + .getSnapshottableDirListing(null, req); + + if (result.hasSnapshottableDirList()) { + return PBHelper.convert(result.getSnapshottableDirList()); + } + return null; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, + String fromSnapshot, String toSnapshot) throws IOException { + GetSnapshotDiffReportRequestProto req = GetSnapshotDiffReportRequestProto + .newBuilder().setSnapshotRoot(snapshotRoot) + .setFromSnapshot(fromSnapshot).setToSnapshot(toSnapshot).build(); + try { + GetSnapshotDiffReportResponseProto result = + rpcProxy.getSnapshotDiffReport(null, req); + + return PBHelper.convert(result.getDiffReport()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1480838&r1=1480837&r2=1480838&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Thu May 9 23:55:49 2013 @@ -30,23 +30,26 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; @@ -64,7 +67,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; @@ -73,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; @@ -89,17 +93,21 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.BlockKey; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; @@ -122,15 +130,16 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; +import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.util.ExactSizeInputStream; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; @@ -291,8 +300,8 @@ public class PBHelper { public static BlockKeyProto convert(BlockKey key) { byte[] encodedKey = key.getEncodedKey(); - ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? new byte[0] - : encodedKey); + ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? + DFSUtil.EMPTY_BYTES : encodedKey); return BlockKeyProto.newBuilder().setKeyId(key.getKeyId()) .setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build(); } @@ -1034,7 +1043,6 @@ public class PBHelper { return new EnumSetWritable(result); } - public static HdfsFileStatus convert(HdfsFileStatusProto fs) { if (fs == null) return null; @@ -1050,6 +1058,25 @@ public class PBHelper { fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null); } + public static SnapshottableDirectoryStatus convert( + SnapshottableDirectoryStatusProto sdirStatusProto) { + if (sdirStatusProto == null) { + return null; + } + final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); + return new SnapshottableDirectoryStatus( + status.getModificationTime(), + status.getAccessTime(), + PBHelper.convert(status.getPermission()), + status.getOwner(), + status.getGroup(), + status.getPath().toByteArray(), + status.getFileId(), + sdirStatusProto.getSnapshotNumber(), + sdirStatusProto.getSnapshotQuota(), + sdirStatusProto.getParentFullpath().toByteArray()); + } + public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs == null) return null; @@ -1085,6 +1112,25 @@ public class PBHelper { return builder.build(); } + public static SnapshottableDirectoryStatusProto convert( + SnapshottableDirectoryStatus status) { + if (status == null) { + return null; + } + int snapshotNumber = status.getSnapshotNumber(); + int snapshotQuota = status.getSnapshotQuota(); + byte[] parentFullPath = status.getParentFullPath(); + ByteString parentFullPathBytes = ByteString.copyFrom( + parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath); + HdfsFileStatusProto fs = convert(status.getDirStatus()); + SnapshottableDirectoryStatusProto.Builder builder = + SnapshottableDirectoryStatusProto + .newBuilder().setSnapshotNumber(snapshotNumber) + .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes) + .setDirStatus(fs); + return builder.build(); + } + public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { if (fs == null) return null; final int len = fs.length; @@ -1326,6 +1372,104 @@ public class PBHelper { return JournalInfoProto.newBuilder().setClusterID(j.getClusterId()) .setLayoutVersion(j.getLayoutVersion()) .setNamespaceID(j.getNamespaceId()).build(); + } + + public static SnapshottableDirectoryStatus[] convert( + SnapshottableDirectoryListingProto sdlp) { + if (sdlp == null) + return null; + List list = sdlp + .getSnapshottableDirListingList(); + if (list.isEmpty()) { + return new SnapshottableDirectoryStatus[0]; + } else { + SnapshottableDirectoryStatus[] result = + new SnapshottableDirectoryStatus[list.size()]; + for (int i = 0; i < list.size(); i++) { + result[i] = PBHelper.convert(list.get(i)); + } + return result; + } + } + + public static SnapshottableDirectoryListingProto convert( + SnapshottableDirectoryStatus[] status) { + if (status == null) + return null; + SnapshottableDirectoryStatusProto[] protos = + new SnapshottableDirectoryStatusProto[status.length]; + for (int i = 0; i < status.length; i++) { + protos[i] = PBHelper.convert(status[i]); + } + List protoList = Arrays.asList(protos); + return SnapshottableDirectoryListingProto.newBuilder() + .addAllSnapshottableDirListing(protoList).build(); + } + + public static DiffReportEntry convert(SnapshotDiffReportEntryProto entry) { + if (entry == null) { + return null; + } + DiffType type = DiffType.getTypeFromLabel(entry + .getModificationLabel()); + return type == null ? null : + new DiffReportEntry(type, entry.getFullpath().toByteArray()); + } + + public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { + if (entry == null) { + return null; + } + byte[] fullPath = entry.getRelativePath(); + ByteString fullPathString = ByteString + .copyFrom(fullPath == null ? DFSUtil.EMPTY_BYTES : fullPath); + + String modification = entry.getType().getLabel(); + + SnapshotDiffReportEntryProto entryProto = SnapshotDiffReportEntryProto + .newBuilder().setFullpath(fullPathString) + .setModificationLabel(modification).build(); + return entryProto; + } + + public static SnapshotDiffReport convert(SnapshotDiffReportProto reportProto) { + if (reportProto == null) { + return null; + } + String snapshotDir = reportProto.getSnapshotRoot(); + String fromSnapshot = reportProto.getFromSnapshot(); + String toSnapshot = reportProto.getToSnapshot(); + List list = reportProto + .getDiffReportEntriesList(); + List entries = new ArrayList(); + for (SnapshotDiffReportEntryProto entryProto : list) { + DiffReportEntry entry = convert(entryProto); + if (entry != null) + entries.add(entry); + } + return new SnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot, + entries); + } + + public static SnapshotDiffReportProto convert(SnapshotDiffReport report) { + if (report == null) { + return null; + } + List entries = report.getDiffList(); + List entryProtos = + new ArrayList(); + for (DiffReportEntry entry : entries) { + SnapshotDiffReportEntryProto entryProto = convert(entry); + if (entryProto != null) + entryProtos.add(entryProto); + } + + SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder() + .setSnapshotRoot(report.getSnapshotRoot()) + .setFromSnapshot(report.getFromSnapshot()) + .setToSnapshot(report.getLaterSnapshotName()) + .addAllDiffReportEntries(entryProtos).build(); + return reportProto; } public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1480838&r1=1480837&r2=1480838&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Thu May 9 23:55:49 2013 @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.security.token.delegation; -import java.io.DataInputStream; +import java.io.DataInput; import java.io.DataOutputStream; import java.io.IOException; import java.io.InterruptedIOException; @@ -110,7 +110,7 @@ public class DelegationTokenSecretManage * @param in input stream to read fsimage * @throws IOException */ - public synchronized void loadSecretManagerState(DataInputStream in) + public synchronized void loadSecretManagerState(DataInput in) throws IOException { if (running) { // a safety check @@ -266,7 +266,7 @@ public class DelegationTokenSecretManage /** * Private helper methods to load Delegation tokens from fsimage */ - private synchronized void loadCurrentTokens(DataInputStream in) + private synchronized void loadCurrentTokens(DataInput in) throws IOException { int numberOfTokens = in.readInt(); for (int i = 0; i < numberOfTokens; i++) { @@ -282,7 +282,7 @@ public class DelegationTokenSecretManage * @param in * @throws IOException */ - private synchronized void loadAllKeys(DataInputStream in) throws IOException { + private synchronized void loadAllKeys(DataInput in) throws IOException { int numberOfKeys = in.readInt(); for (int i = 0; i < numberOfKeys; i++) { DelegationKey value = new DelegationKey(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1480838&r1=1480837&r2=1480838&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java Thu May 9 23:55:49 2013 @@ -217,7 +217,7 @@ public class BackupImage extends FSImage } lastAppliedTxId = logLoader.getLastAppliedTxId(); - namesystem.dir.updateCountForINodeWithQuota(); // inefficient! + FSImage.updateCountForQuota(namesystem.dir.rootDir); // inefficient! } finally { backupInputStream.clear(); }