Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9CABA10D30 for ; Thu, 27 Mar 2014 18:16:19 +0000 (UTC) Received: (qmail 69734 invoked by uid 500); 27 Mar 2014 18:16:18 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 69626 invoked by uid 500); 27 Mar 2014 18:16:14 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 67300 invoked by uid 99); 27 Mar 2014 18:16:11 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 27 Mar 2014 18:16:11 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 27 Mar 2014 18:16:08 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 4127A238883D; Thu, 27 Mar 2014 18:15:48 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1582433 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ Date: Thu, 27 Mar 2014 18:15:47 -0000 To: hdfs-commits@hadoop.apache.org From: wheat9@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140327181548.4127A238883D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: wheat9 Date: Thu Mar 27 18:15:47 2014 New Revision: 1582433 URL: http://svn.apache.org/r1582433 Log: HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. Contributed by Akira Ajisaka. Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1582433&r1=1582432&r2=1582433&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Mar 27 18:15:47 2014 @@ -266,7 +266,10 @@ Release 2.5.0 - UNRELEASED HDFS-6119. FSNamesystem code cleanup. (suresh) - HDFS-6158. Clean up dead code for OfflineImageViewer (wheat9) + HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9) + + HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. + (Akira Ajisaka via wheat9) OPTIMIZATIONS Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1582433&r1=1582432&r2=1582433&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Mar 27 18:15:47 2014 @@ -179,7 +179,7 @@ http://maven.apache.org/xsd/maven-4.0.0. io.netty netty - test + compile Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java?rev=1582433&view=auto ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java (added) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java Thu Mar 27 18:15:47 2014 @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.handler.codec.http.DefaultHttpResponse; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.jboss.netty.handler.codec.http.QueryStringDecoder; + +/** + * Implement the read-only WebHDFS API for fsimage. + */ +public class FSImageHandler extends SimpleChannelUpstreamHandler { + public static final Log LOG = LogFactory.getLog(FSImageHandler.class); + private final FSImageLoader loader; + + public FSImageHandler(FSImageLoader loader) throws IOException { + this.loader = loader; + } + + @Override + public void messageReceived( + ChannelHandlerContext ctx, MessageEvent e) throws Exception { + HttpRequest request = (HttpRequest) e.getMessage(); + if (request.getMethod() == HttpMethod.GET){ + String uri = request.getUri(); + QueryStringDecoder decoder = new QueryStringDecoder(uri); + + String op = "null"; + if (decoder.getParameters().containsKey("op")) { + op = decoder.getParameters().get("op").get(0).toUpperCase(); + } + HttpResponse response = new DefaultHttpResponse( + HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + String json = null; + + if (op.equals("LISTSTATUS")) { + try { + json = loader.listStatus(decoder.getPath()); + response.setStatus(HttpResponseStatus.OK); + response.setHeader(HttpHeaders.Names.CONTENT_TYPE, + "application/json"); + HttpHeaders.setContentLength(response, json.length()); + } catch (Exception ex) { + LOG.warn(ex.getMessage()); + response.setStatus(HttpResponseStatus.NOT_FOUND); + } + } else { + response.setStatus(HttpResponseStatus.BAD_REQUEST); + } + + e.getChannel().write(response); + if (json != null) { + e.getChannel().write(json); + } + LOG.info(response.getStatus().getCode() + " method=GET op=" + op + + " target=" + decoder.getPath()); + } else { + // only HTTP GET is allowed since fsimage is read-only. + HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, + HttpResponseStatus.METHOD_NOT_ALLOWED); + e.getChannel().write(response); + LOG.info(response.getStatus().getCode() + " method=" + + request.getMethod().getName()); + } + e.getFuture().addListener(ChannelFutureListener.CLOSE); + } +} Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java?rev=1582433&view=auto ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java (added) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java Thu Mar 27 18:15:47 2014 @@ -0,0 +1,369 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.BufferedInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; +import org.apache.hadoop.hdfs.server.namenode.FSImageUtil; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto; +import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.io.IOUtils; +import org.codehaus.jackson.map.ObjectMapper; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.io.LimitInputStream; + +/** + * FSImageLoader loads fsimage and provide methods to return JSON formatted + * file status of the namespace of the fsimage. + */ +public class FSImageLoader { + public static final Log LOG = LogFactory.getLog(FSImageHandler.class); + + private static String[] stringTable; + private static Map inodes = + Maps.newHashMap(); + private static Map dirmap = Maps.newHashMap(); + private static List + refList = Lists.newArrayList(); + + private FSImageLoader() {} + + /** + * Load fsimage into the memory. + * @param inputFile the filepath of the fsimage to load. + * @return FSImageLoader + * @throws IOException if failed to load fsimage. + */ + public static FSImageLoader load(String inputFile) throws IOException { + Configuration conf = new Configuration(); + RandomAccessFile file = new RandomAccessFile(inputFile, "r"); + if (!FSImageUtil.checkFileFormat(file)) { + throw new IOException("Unrecognized FSImage"); + } + + FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file); + FileInputStream fin = null; + try { + fin = new FileInputStream(file.getFD()); + + ArrayList sections = + Lists.newArrayList(summary.getSectionsList()); + Collections.sort(sections, + new Comparator() { + @Override + public int compare(FsImageProto.FileSummary.Section s1, + FsImageProto.FileSummary.Section s2) { + FSImageFormatProtobuf.SectionName n1 = + FSImageFormatProtobuf.SectionName.fromString(s1.getName()); + FSImageFormatProtobuf.SectionName n2 = + FSImageFormatProtobuf.SectionName.fromString(s2.getName()); + if (n1 == null) { + return n2 == null ? 0 : -1; + } else if (n2 == null) { + return -1; + } else { + return n1.ordinal() - n2.ordinal(); + } + } + }); + + for (FsImageProto.FileSummary.Section s : sections) { + fin.getChannel().position(s.getOffset()); + InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, + summary.getCodec(), new BufferedInputStream(new LimitInputStream( + fin, s.getLength()))); + + switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) { + case STRING_TABLE: + loadStringTable(is); + break; + case INODE: + loadINodeSection(is); + break; + case INODE_REFERENCE: + loadINodeReferenceSection(is); + break; + case INODE_DIR: + loadINodeDirectorySection(is); + break; + default: + break; + } + } + } finally { + IOUtils.cleanup(null, fin); + } + return new FSImageLoader(); + } + + private static void loadINodeDirectorySection(InputStream in) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading directory section"); + } + while (true) { + FsImageProto.INodeDirectorySection.DirEntry e = + FsImageProto.INodeDirectorySection.DirEntry.parseDelimitedFrom(in); + // note that in is a LimitedInputStream + if (e == null) { + break; + } + long[] l = new long[e.getChildrenCount() + e.getRefChildrenCount()]; + for (int i = 0; i < e.getChildrenCount(); ++i) { + l[i] = e.getChildren(i); + } + for (int i = e.getChildrenCount(); i < l.length; i++) { + int refId = e.getRefChildren(i - e.getChildrenCount()); + l[i] = refList.get(refId).getReferredId(); + } + dirmap.put(e.getParent(), l); + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded directory (parent " + e.getParent() + + ") with " + e.getChildrenCount() + " children and " + + e.getRefChildrenCount() + " reference children"); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded " + dirmap.size() + " directories"); + } + } + + private static void loadINodeReferenceSection(InputStream in) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading inode reference section"); + } + while (true) { + FsImageProto.INodeReferenceSection.INodeReference e = + FsImageProto.INodeReferenceSection.INodeReference + .parseDelimitedFrom(in); + if (e == null) { + break; + } + refList.add(e); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded inode reference named '" + e.getName() + + "' referring to id " + e.getReferredId() + ""); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded " + refList.size() + " inode references"); + } + } + + private static void loadINodeSection(InputStream in) throws IOException { + FsImageProto.INodeSection s = FsImageProto.INodeSection + .parseDelimitedFrom(in); + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + s.getNumInodes() + " inodes in inode section"); + } + for (int i = 0; i < s.getNumInodes(); ++i) { + FsImageProto.INodeSection.INode p = FsImageProto.INodeSection.INode + .parseDelimitedFrom(in); + inodes.put(p.getId(), p); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded inode id " + p.getId() + " type " + p.getType() + + " name '" + p.getName().toStringUtf8() + "'"); + } + } + } + + private static void loadStringTable(InputStream in) throws IOException { + FsImageProto.StringTableSection s = FsImageProto.StringTableSection + .parseDelimitedFrom(in); + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + s.getNumEntry() + " strings in string section"); + } + stringTable = new String[s.getNumEntry() + 1]; + for (int i = 0; i < s.getNumEntry(); ++i) { + FsImageProto.StringTableSection.Entry e = FsImageProto + .StringTableSection.Entry.parseDelimitedFrom(in); + stringTable[e.getId()] = e.getStr(); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded string " + e.getStr()); + } + } + } + + /** + * Return the JSON formatted list of the files in the specified directory. + * @param path a path specifies a directory to list + * @return JSON formatted file list in the directory + * @throws IOException if failed to serialize fileStatus to JSON. + */ + public String listStatus(String path) throws IOException { + StringBuilder sb = new StringBuilder(); + ObjectMapper mapper = new ObjectMapper(); + List> fileStatusList = getFileStatusList(path); + sb.append("{\"FileStatuses\":{\"FileStatus\":[\n"); + int i = 0; + for (Map fileStatusMap : fileStatusList) { + if (i++ != 0) { + sb.append(','); + } + sb.append(mapper.writeValueAsString(fileStatusMap)); + } + sb.append("\n]}}\n"); + return sb.toString(); + } + + private List> getFileStatusList(String path) { + List> list = new ArrayList>(); + long id = getINodeId(path); + FsImageProto.INodeSection.INode inode = inodes.get(id); + if (inode.getType() == FsImageProto.INodeSection.INode.Type.DIRECTORY) { + long[] children = dirmap.get(id); + for (long cid : children) { + list.add(getFileStatus(inodes.get(cid), true)); + } + } else { + list.add(getFileStatus(inode, false)); + } + return list; + } + + /** + * Return the INodeId of the specified path. + */ + private long getINodeId(String strPath) { + if (strPath.equals("/")) { + return INodeId.ROOT_INODE_ID; + } + + String[] nameList = strPath.split("/"); + Preconditions.checkArgument(nameList.length > 1, + "Illegal path: " + strPath); + long id = INodeId.ROOT_INODE_ID; + for (int i = 1; i < nameList.length; i++) { + long[] children = dirmap.get(id); + Preconditions.checkNotNull(children, "The specified path: " + + strPath + " is not found in the fsimage."); + String cName = nameList[i]; + boolean findChildren = false; + for (long cid : children) { + if (cName.equals(inodes.get(cid).getName().toStringUtf8())) { + id = cid; + findChildren = true; + break; + } + } + Preconditions.checkArgument(findChildren, "The specified path: " + + strPath + " is not found in the fsimage."); + } + return id; + } + + private Map getFileStatus + (FsImageProto.INodeSection.INode inode, boolean printSuffix){ + Map map = Maps.newHashMap(); + switch (inode.getType()) { + case FILE: { + FsImageProto.INodeSection.INodeFile f = inode.getFile(); + PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( + f.getPermission(), stringTable); + map.put("accessTime", f.getAccessTime()); + map.put("blockSize", f.getPreferredBlockSize()); + map.put("group", p.getGroupName()); + map.put("length", getFileSize(f)); + map.put("modificationTime", f.getModificationTime()); + map.put("owner", p.getUserName()); + map.put("pathSuffix", + printSuffix ? inode.getName().toStringUtf8() : ""); + map.put("permission", toString(p.getPermission())); + map.put("replication", f.getReplication()); + map.put("type", inode.getType()); + map.put("fileId", inode.getId()); + map.put("childrenNum", 0); + return map; + } + case DIRECTORY: { + FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory(); + PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( + d.getPermission(), stringTable); + map.put("accessTime", 0); + map.put("blockSize", 0); + map.put("group", p.getGroupName()); + map.put("length", 0); + map.put("modificationTime", d.getModificationTime()); + map.put("owner", p.getUserName()); + map.put("pathSuffix", + printSuffix ? inode.getName().toStringUtf8() : ""); + map.put("permission", toString(p.getPermission())); + map.put("replication", 0); + map.put("type", inode.getType()); + map.put("fileId", inode.getId()); + map.put("childrenNum", dirmap.get(inode.getId()).length); + return map; + } + case SYMLINK: { + FsImageProto.INodeSection.INodeSymlink d = inode.getSymlink(); + PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( + d.getPermission(), stringTable); + map.put("accessTime", d.getAccessTime()); + map.put("blockSize", 0); + map.put("group", p.getGroupName()); + map.put("length", 0); + map.put("modificationTime", d.getModificationTime()); + map.put("owner", p.getUserName()); + map.put("pathSuffix", + printSuffix ? inode.getName().toStringUtf8() : ""); + map.put("permission", toString(p.getPermission())); + map.put("replication", 0); + map.put("type", inode.getType()); + map.put("symlink", d.getTarget().toStringUtf8()); + map.put("fileId", inode.getId()); + map.put("childrenNum", 0); + return map; + } + default: + return null; + } + } + + private long getFileSize(FsImageProto.INodeSection.INodeFile f) { + long size = 0; + for (HdfsProtos.BlockProto p : f.getBlocksList()) { + size += p.getNumBytes(); + } + return size; + } + + private String toString(FsPermission permission) { + return String.format("%o", permission.toShort()); + } +} Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java?rev=1582433&r1=1582432&r2=1582433&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java Thu Mar 27 18:15:47 2014 @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; /** * OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or @@ -69,6 +70,8 @@ public class OfflineImageViewerPB { + " -maxSize specifies the range [0, maxSize] of file sizes to be\n" + " analyzed (128GB by default).\n" + " -step defines the granularity of the distribution. (2MB by default)\n" + + " * Web: Run a viewer to expose read-only WebHDFS API.\n" + + " -addr specifies the address to listen. (localhost:5978 by default)\n" + "\n" + "Required command line arguments:\n" + "-i,--inputFile FSImage file to process.\n" @@ -103,6 +106,7 @@ public class OfflineImageViewerPB { options.addOption("h", "help", false, ""); options.addOption("maxSize", true, ""); options.addOption("step", true, ""); + options.addOption("addr", true, ""); return options; } @@ -161,6 +165,10 @@ public class OfflineImageViewerPB { } else if (processor.equals("XML")) { new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile, "r")); + } else if (processor.equals("Web")) { + String addr = cmd.getOptionValue("addr", "localhost:5978"); + new WebImageViewer(NetUtils.createSocketAddr(addr)) + .initServerAndWait(inputFile); } else { new LsrPBImage(conf, out).visit(new RandomAccessFile(inputFile, "r")); } Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java?rev=1582433&view=auto ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java (added) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java Thu Mar 27 18:15:47 2014 @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.offlineImageViewer; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.concurrent.Executors; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFactory; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelStateEvent; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.channel.group.ChannelGroup; +import org.jboss.netty.channel.group.DefaultChannelGroup; +import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; +import org.jboss.netty.handler.codec.http.HttpRequestDecoder; +import org.jboss.netty.handler.codec.http.HttpResponseEncoder; +import org.jboss.netty.handler.codec.string.StringEncoder; + +import com.google.common.annotations.VisibleForTesting; + +/** + * WebImageViewer loads a fsimage and exposes read-only WebHDFS API for its + * namespace. + */ +public class WebImageViewer { + public static final Log LOG = LogFactory.getLog(WebImageViewer.class); + + private Channel channel; + private InetSocketAddress address; + private final ChannelFactory factory = + new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), + Executors.newCachedThreadPool(), 1); + private final ServerBootstrap bootstrap = new ServerBootstrap(factory); + + static final ChannelGroup allChannels = + new DefaultChannelGroup("WebImageViewer"); + + public WebImageViewer(InetSocketAddress address) { + this.address = address; + } + + /** + * Start WebImageViewer and wait until the thread is interrupted. + * @param fsimage the fsimage to load. + * @throws IOException if failed to load the fsimage. + */ + public void initServerAndWait(String fsimage) throws IOException { + initServer(fsimage); + try { + channel.getCloseFuture().await(); + } catch (InterruptedException e) { + LOG.info("Interrupted. Stopping the WebImageViewer."); + shutdown(); + } + } + + /** + * Start WebImageViewer. + * @param fsimage the fsimage to load. + * @throws IOException if fail to load the fsimage. + */ + @VisibleForTesting + public void initServer(String fsimage) throws IOException { + FSImageLoader loader = FSImageLoader.load(fsimage); + + ChannelPipeline pipeline = Channels.pipeline(); + pipeline.addLast("channelTracker", new SimpleChannelUpstreamHandler() { + @Override + public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e) + throws Exception { + allChannels.add(e.getChannel()); + } + }); + pipeline.addLast("httpDecoder", new HttpRequestDecoder()); + pipeline.addLast("requestHandler", new FSImageHandler(loader)); + pipeline.addLast("stringEncoder", new StringEncoder()); + pipeline.addLast("httpEncoder", new HttpResponseEncoder()); + bootstrap.setPipeline(pipeline); + channel = bootstrap.bind(address); + allChannels.add(channel); + + address = (InetSocketAddress) channel.getLocalAddress(); + LOG.info("WebImageViewer started. Listening on " + address.toString() + + ". Press Ctrl+C to stop the viewer."); + } + + /** + * Stop WebImageViewer. + */ + @VisibleForTesting + public void shutdown() { + allChannels.close().awaitUninterruptibly(); + factory.releaseExternalResources(); + } + + /** + * Get the listening port. + * @return the port WebImageViewer is listening on + */ + @VisibleForTesting + public int getPort() { + return address.getPort(); + } +} Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1582433&r1=1582432&r2=1582433&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Thu Mar 27 18:15:47 2014 @@ -28,9 +28,13 @@ import java.io.PrintWriter; import java.io.RandomAccessFile; import java.io.StringReader; import java.io.StringWriter; +import java.net.HttpURLConnection; +import java.net.URL; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -52,8 +56,12 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.type.TypeReference; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -289,4 +297,66 @@ public class TestOfflineImageViewer { final String xml = output.getBuffer().toString(); parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler()); } + + @Test + public void testWebImageViewer() throws IOException, InterruptedException { + WebImageViewer viewer = new WebImageViewer( + NetUtils.createSocketAddr("localhost:0")); + try { + viewer.initServer(originalFsimage.getAbsolutePath()); + int port = viewer.getPort(); + + // 1. LISTSTATUS operation to a valid path + URL url = new URL("http://localhost:" + port + "/?op=LISTSTATUS"); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setRequestMethod("GET"); + connection.connect(); + assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode()); + assertEquals("application/json", connection.getContentType()); + + String content = org.apache.commons.io.IOUtils.toString( + connection.getInputStream()); + LOG.info("content: " + content); + + // verify the number of directories listed + ObjectMapper mapper = new ObjectMapper(); + Map>>> fileStatuses = + mapper.readValue(content, new TypeReference + >>>>(){}); + List> fileStatusList = fileStatuses + .get("FileStatuses").get("FileStatus"); + assertEquals(NUM_DIRS, fileStatusList.size()); + + // verify the number of files in a directory + Map fileStatusMap = fileStatusList.get(0); + assertEquals(FILES_PER_DIR, fileStatusMap.get("childrenNum")); + + // 2. LISTSTATUS operation to a invalid path + url = new URL("http://localhost:" + port + "/invalid/?op=LISTSTATUS"); + connection = (HttpURLConnection) url.openConnection(); + connection.setRequestMethod("GET"); + connection.connect(); + assertEquals(HttpURLConnection.HTTP_NOT_FOUND, + connection.getResponseCode()); + + // 3. invalid operation + url = new URL("http://localhost:" + port + "/?op=INVALID"); + connection = (HttpURLConnection) url.openConnection(); + connection.setRequestMethod("GET"); + connection.connect(); + assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, + connection.getResponseCode()); + + // 4. invalid method + url = new URL("http://localhost:" + port + "/?op=LISTSTATUS"); + connection = (HttpURLConnection) url.openConnection(); + connection.setRequestMethod("POST"); + connection.connect(); + assertEquals(HttpURLConnection.HTTP_BAD_METHOD, + connection.getResponseCode()); + } finally { + // shutdown the viewer + viewer.shutdown(); + } + } }