hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r817453 [3/3] - in /hadoop/hdfs/branches/HDFS-265: ./ .eclipse.templates/.launches/ src/contrib/block_forensics/ src/contrib/block_forensics/client/ src/contrib/block_forensics/ivy/ src/contrib/block_forensics/src/java/org/apache/hadoop/blo...
Date Mon, 21 Sep 2009 23:07:45 GMT
Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
(original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
Mon Sep 21 23:07:44 2009
@@ -24,7 +24,7 @@
 
   <header>
     <title>
-      HDFS User Guide
+      HDFS Users Guide
     </title>
   </header>
 
@@ -32,9 +32,8 @@
     <section> <title>Purpose</title>
       <p>
  This document is a starting point for users working with
- Hadoop Distributed File System (HDFS) either as a part of a
- <a href="http://hadoop.apache.org/">Hadoop</a>
- cluster or as a stand-alone general purpose distributed file system.
+ Hadoop Distributed File System (HDFS) either as a part of a Hadoop cluster  
+ or as a stand-alone general purpose distributed file system.
  While HDFS is designed to "just work" in many environments, a working
  knowledge of HDFS helps greatly with configuration improvements and
  diagnostics on a specific cluster.
@@ -46,7 +45,7 @@
  HDFS is the primary distributed storage used by Hadoop applications. A
  HDFS cluster primarily consists of a NameNode that manages the
  file system metadata and DataNodes that store the actual data. The
- <a href="hdfs_design.html">HDFS Architecture</a> describes HDFS in detail. This
user guide primarily deals with 
+ <a href="hdfs_design.html">HDFS Architecture Guide</a> describes HDFS in detail.
This user guide primarily deals with 
  the interaction of users and administrators with HDFS clusters. 
  The <a href="images/hdfsarchitecture.gif">HDFS architecture diagram</a> depicts

  basic interactions among NameNode, the DataNodes, and the clients. 
@@ -61,8 +60,7 @@
     <li>
     	Hadoop, including HDFS, is well suited for distributed storage
     	and distributed processing using commodity hardware. It is fault
-    	tolerant, scalable, and extremely simple to expand.
-    	<a href="mapred_tutorial.html">Map/Reduce</a>,
+    	tolerant, scalable, and extremely simple to expand. MapReduce, 
     	well known for its simplicity and applicability for large set of
     	distributed applications, is an integral part of Hadoop.
     </li>
@@ -134,18 +132,17 @@
     </li>
     </ul>
     
-    </section> <section> <title> Pre-requisites </title>
+    </section> <section> <title> Prerequisites </title>
     <p>
- 	The following documents describe installation and set up of a
- 	Hadoop cluster : 
+ 	The following documents describe how to install and set up a Hadoop cluster: 
     </p>
  	<ul>
  	<li>
- 		<a href="quickstart.html">Hadoop Quick Start</a>
+ 		<a href="http://hadoop.apache.org/common/docs/current/single_node_setup.html">Single
Node Setup</a>
  		for first-time users.
  	</li>
  	<li>
- 		<a href="cluster_setup.html">Hadoop Cluster Setup</a>
+ 		<a href="http://hadoop.apache.org/common/docs/current/cluster_setup.html">Cluster
Setup</a>
  		for large, distributed clusters.
  	</li>
     </ul>
@@ -173,14 +170,15 @@
       Hadoop includes various shell-like commands that directly
       interact with HDFS and other file systems that Hadoop supports.
       The command
-      <code>bin/hadoop fs -help</code>
+      <code>bin/hdfs dfs -help</code>
       lists the commands supported by Hadoop
       shell. Furthermore, the command
-      <code>bin/hadoop fs -help command-name</code>
+      <code>bin/hdfs dfs -help command-name</code>
       displays more detailed help for a command. These commands support
-      most of the normal files ystem operations like copying files,
+      most of the normal files system operations like copying files,
       changing file permissions, etc. It also supports a few HDFS
-      specific operations like changing replication of files.
+      specific operations like changing replication of files. 
+      For more information see <a href="http://hadoop.apache.org/common/docs/current/file_system_shell.html">File
System Shell Guide</a>.
      </p>
 
    <section> <title> DFSAdmin Command </title>
@@ -223,17 +221,19 @@
     </li>
    	</ul>
    	<p>
-   	  For command usage, see <a href="commands_manual.html#dfsadmin">dfsadmin command</a>.
+   	  For command usage, see  
+   	  <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#dfsadmin">dfsadmin</a>.
    	</p>  
    </section>
    
    </section> 
 	<section> <title>Secondary NameNode</title>
-   <p>
-     The Secondary NameNode has been deprecated; considering using the 
-   <a href="hdfs_user_guide.html#Checkpoint+node">Checkpoint node</a> or 
-   <a href="hdfs_user_guide.html#Backup+node">Backup node</a> instead.
-   </p>
+   <note>
+   The Secondary NameNode has been deprecated. 
+   Instead, consider using the 
+   <a href="hdfs_user_guide.html#Checkpoint+node">Checkpoint Node</a> or 
+   <a href="hdfs_user_guide.html#Backup+node">Backup Node</a>.
+   </note>
    <p>	
      The NameNode stores modifications to the file system as a log
      appended to a native file system file, <code>edits</code>. 
@@ -277,10 +277,11 @@
      read by the primary NameNode if necessary.
    </p>
    <p>
-     For command usage, see <a href="commands_manual.html#secondarynamenode"><code>secondarynamenode</code>
command</a>.
+     For command usage, see  
+     <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#secondarynamenode">secondarynamenode</a>.
    </p>
    
-   </section><section> <title> Checkpoint node </title>
+   </section><section> <title> Checkpoint Node </title>
    <p>NameNode persists its namespace using two files: <code>fsimage</code>,
       which is the latest checkpoint of the namespace and <code>edits</code>,
       a journal (log) of changes to the namespace since the checkpoint.
@@ -329,17 +330,17 @@
    </p>
    <p>Multiple checkpoint nodes may be specified in the cluster configuration file.</p>
    <p>
-     For command usage, see
-     <a href="commands_manual.html#namenode"><code>namenode</code> command</a>.
+     For command usage, see  
+     <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#namenode">namenode</a>.
    </p>
    </section>
 
-   <section> <title> Backup node </title>
+   <section> <title> Backup Node </title>
    <p>	
     The Backup node provides the same checkpointing functionality as the 
     Checkpoint node, as well as maintaining an in-memory, up-to-date copy of the
     file system namespace that is always synchronized with the active NameNode state.
-    Along with accepting a journal stream of filesystem edits from 
+    Along with accepting a journal stream of file system edits from 
     the NameNode and persisting this to disk, the Backup node also applies 
     those edits into its own copy of the namespace in memory, thus creating 
     a backup of the namespace.
@@ -384,12 +385,12 @@
     For a complete discussion of the motivation behind the creation of the 
     Backup node and Checkpoint node, see 
     <a href="https://issues.apache.org/jira/browse/HADOOP-4539">HADOOP-4539</a>.
-    For command usage, see 
-    <a href="commands_manual.html#namenode"><code>namenode</code> command</a>.
+    For command usage, see  
+     <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#namenode">namenode</a>.
    </p>
    </section>
 
-   <section> <title> Import checkpoint </title>
+   <section> <title> Import Checkpoint </title>
    <p>
      The latest checkpoint can be imported to the NameNode if
      all other copies of the image and the edits files are lost.
@@ -418,8 +419,8 @@
      consistent, but does not modify it in any way.
    </p>
    <p>
-     For command usage, see
-     <a href="commands_manual.html#namenode"><code>namenode</code> command</a>.
+     For command usage, see  
+      <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#namenode">namenode</a>.
    </p>
    </section>
 
@@ -461,7 +462,8 @@
       <a href="http://issues.apache.org/jira/browse/HADOOP-1652">HADOOP-1652</a>.
     </p>
     <p>
-     For command usage, see <a href="commands_manual.html#balancer">balancer command</a>.
+     For command usage, see  
+     <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#balancer">balancer</a>.
    </p>
     
    </section> <section> <title> Rack Awareness </title>
@@ -512,7 +514,8 @@
       <code>fsck</code> ignores open files but provides an option to select all
files during reporting.
       The HDFS <code>fsck</code> command is not a
       Hadoop shell command. It can be run as '<code>bin/hadoop fsck</code>'.
-      For command usage, see <a href="commands_manual.html#fsck"><code>fsck</code>
command</a>. 
+      For command usage, see  
+      <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html#fsck">fsck</a>.
       <code>fsck</code> can be run on the whole file system or on a subset of
files.
      </p>
      
@@ -527,7 +530,7 @@
       of Hadoop and rollback the cluster to the state it was in 
       before
       the upgrade. HDFS upgrade is described in more detail in 
-      <a href="http://wiki.apache.org/hadoop/Hadoop%20Upgrade">upgrade wiki</a>.
+      <a href="http://wiki.apache.org/hadoop/Hadoop%20Upgrade">Hadoop Upgrade</a>
Wiki page.
       HDFS can have one such backup at a time. Before upgrading,
       administrators need to remove existing backup using <code>bin/hadoop
       dfsadmin -finalizeUpgrade</code> command. The following
@@ -571,13 +574,13 @@
       treated as the superuser for HDFS. Future versions of HDFS will
       support network authentication protocols like Kerberos for user
       authentication and encryption of data transfers. The details are discussed in the 
-      <a href="hdfs_permissions_guide.html">HDFS Admin Guide: Permissions</a>.
+      <a href="hdfs_permissions_guide.html">Permissions Guide</a>.
      </p>
      
    </section> <section> <title> Scalability </title>
      <p>
-      Hadoop currently runs on clusters with thousands of nodes.
-      <a href="http://wiki.apache.org/hadoop/PoweredBy">Powered By Hadoop</a>
+      Hadoop currently runs on clusters with thousands of nodes. The  
+      <a href="http://wiki.apache.org/hadoop/PoweredBy">PoweredBy</a> Wiki page

       lists some of the organizations that deploy Hadoop on large
       clusters. HDFS has one NameNode for each cluster. Currently
       the total memory available on NameNode is the primary scalability
@@ -585,8 +588,8 @@
       files stored in HDFS helps with increasing cluster size without
       increasing memory requirements on NameNode.
    
-      The default configuration may not suite very large clustes.
-      <a href="http://wiki.apache.org/hadoop/FAQ">Hadoop FAQ</a> page lists
+      The default configuration may not suite very large clustes. The 
+      <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a> Wiki page lists
       suggested configuration improvements for large Hadoop clusters.
      </p>
      
@@ -599,15 +602,16 @@
       </p>
       <ul>
       <li>
-        <a href="http://hadoop.apache.org/">Hadoop Home Page</a>: The start page
for everything Hadoop.
+        <a href="http://hadoop.apache.org/">Hadoop Site</a>: The home page for
the Apache Hadoop site.
       </li>
       <li>
-        <a href="http://wiki.apache.org/hadoop/FrontPage">Hadoop Wiki</a>
-        : Front page for Hadoop Wiki documentation. Unlike this
-        guide which is part of Hadoop source tree, Hadoop Wiki is
+        <a href="http://wiki.apache.org/hadoop/FrontPage">Hadoop Wiki</a>:
+        The home page (FrontPage) for the Hadoop Wiki. Unlike the released documentation,

+        which is part of Hadoop source tree, Hadoop Wiki is
         regularly edited by Hadoop Community.
       </li>
-      <li> <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a> from Hadoop
Wiki.
+      <li> <a href="http://wiki.apache.org/hadoop/FAQ">FAQ</a>: 
+      The FAQ Wiki page.
       </li>
       <li>
         Hadoop <a href="http://hadoop.apache.org/core/docs/current/api/">
@@ -623,7 +627,7 @@
          description of most of the configuration variables available.
       </li>
       <li>
-        <a href="commands_manual.html">Hadoop Command Guide</a>: commands usage.
+        <a href="http://hadoop.apache.org/common/docs/current/commands_manual.html">Hadoop
Commands Guide</a>: Hadoop commands usage.
       </li>
       </ul>
      </section>

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/index.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/index.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/index.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/index.xml Mon Sep
21 23:07:44 2009
@@ -19,19 +19,28 @@
 <!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
 
 <document>
-  
   <header>
     <title>Overview</title>
   </header>
-  
   <body>
+  
   <p>
-  The Hadoop Documentation provides the information you need to get started using Hadoop,
the Hadoop Distributed File System (HDFS), and Hadoop on Demand (HOD).
-  </p><p>
-Begin with the <a href="quickstart.html">Hadoop Quick Start</a> which shows you
how to set up a single-node Hadoop installation. Then move on to the <a href="cluster_setup.html">Hadoop
Cluster Setup</a> to learn how to set up a multi-node Hadoop installation. Once your
Hadoop installation is in place, try out the <a href="mapred_tutorial.html">Hadoop Map/Reduce
Tutorial</a>. 
-  </p><p>
-If you have more questions, you can ask on the <a href="ext:lists">Hadoop Core Mailing
Lists</a> or browse the <a href="ext:archive">Mailing List Archives</a>.
-    </p>
-  </body>
+  The HDFS Documentation provides the information you need to get started using the Hadoop
Distributed File System. 
+  Begin with the <a href="hdfs_user_guide.html">HDFS Users Guide</a> to obtain
an overview of the system and then
+  move on to the <a href="hdfs_design.html">HDFS Architecture Guide</a> for more
detailed information.
+  </p>
   
+  <p>
+   HDFS commonly works in tandem with a cluster environment and MapReduce applications. 
+   For information about Hadoop clusters (single or multi node) see the 
+ <a href="http://hadoop.apache.org/common/docs/current/index.html">Hadoop Common Documentation</a>.
+   For information about MapReduce see the 
+ <a href="http://hadoop.apache.org/mapreduce/docs/current/index.html">MapReduce Documentation</a>.
+  </p>   
+  
+<p>
+If you have more questions, you can ask on the <a href="ext:lists">HDFS Mailing Lists</a>
or browse the <a href="ext:archive">Mailing List Archives</a>.
+</p>
+
+</body>
 </document>

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/libhdfs.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/libhdfs.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/libhdfs.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/libhdfs.xml Mon
Sep 21 23:07:44 2009
@@ -1,97 +1,110 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
-          "http://forrest.apache.org/dtd/document-v20.dtd">
-
-
-<document>
-<header>
-<title>C API to HDFS: libhdfs</title>
-<meta name="http-equiv">Content-Type</meta>
-<meta name="content">text/html;</meta>
-<meta name="charset">utf-8</meta>
-</header>
-<body>
-<section>
-<title>C API to HDFS: libhdfs</title>
-
-<p>
-libhdfs is a JNI based C api for Hadoop's DFS. It provides C apis to a subset of the HDFS
APIs to manipulate DFS files and the filesystem. libhdfs is part of the hadoop distribution
and comes pre-compiled in ${HADOOP_HOME}/libhdfs/libhdfs.so .
-</p>
-
-</section>
-<section>
-<title>The APIs</title>
-
-<p>
-The libhdfs APIs are a subset of: <a href="api/org/apache/hadoop/fs/FileSystem.html" >hadoop
fs APIs</a>.  
-</p>
-<p>
-The header file for libhdfs describes each API in detail and is available in ${HADOOP_HOME}/src/c++/libhdfs/hdfs.h
-</p>
-</section>
-<section>
-<title>A sample program</title>
-
-<source>
-#include "hdfs.h" 
-
-int main(int argc, char **argv) {
-
-    hdfsFS fs = hdfsConnect("default", 0);
-    const char* writePath = "/tmp/testfile.txt";
-    hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
-    if(!writeFile) {
-          fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-          exit(-1);
-    }
-    char* buffer = "Hello, World!";
-    tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
-    if (hdfsFlush(fs, writeFile)) {
-           fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
-          exit(-1);
-    }
-   hdfsCloseFile(fs, writeFile);
-}
-
-</source>
-</section>
-
-<section>
-<title>How to link with the library</title>
-<p>
-See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_HOME}/src/c++/libhdfs/Makefile)
or something like:
-gcc above_sample.c -I${HADOOP_HOME}/src/c++/libhdfs -L${HADOOP_HOME}/libhdfs -lhdfs -o above_sample
-</p>
-</section>
-<section>
-<title>Common problems</title>
-<p>
-The most common problem is the CLASSPATH is not set properly when calling a program that
uses libhdfs. Make sure you set it to all the hadoop jars needed to run Hadoop itself. Currently,
there is no way to programmatically generate the classpath, but a good bet is to include all
the jar files in ${HADOOP_HOME} and ${HADOOP_HOME}/lib as well as the right configuration
directory containing hdfs-site.xml
-</p>
-</section>
-<section>
-<title>libhdfs is thread safe</title>
-<p>Concurrency and Hadoop FS "handles" - the hadoop FS implementation includes a FS
handle cache which caches based on the URI of the namenode along with the user connecting.
So, all calls to hdfsConnect will return the same handle but calls to hdfsConnectAsUser with
different users will return different handles.  But, since HDFS client handles are completely
thread safe, this has no bearing on concurrency. 
-</p>
-<p>Concurrency and libhdfs/JNI - the libhdfs calls to JNI should always be creating
thread local storage, so (in theory), libhdfs should be as thread safe as the underlying calls
to the Hadoop FS.
-</p>
-</section>
-</body>
-</document>
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
+          "http://forrest.apache.org/dtd/document-v20.dtd">
+
+<document>
+<header>
+<title>C API libhdfs</title>
+<meta name="http-equiv">Content-Type</meta>
+<meta name="content">text/html;</meta>
+<meta name="charset">utf-8</meta>
+</header>
+<body>
+<section>
+<title>Overview</title>
+
+<p>
+libhdfs is a JNI based C API for Hadoop's Distributed File System (HDFS).
+It provides C APIs to a subset of the HDFS APIs to manipulate HDFS files and
+the filesystem. libhdfs is part of the Hadoop distribution and comes 
+pre-compiled in ${HADOOP_HOME}/libhdfs/libhdfs.so .
+</p>
+
+</section>
+<section>
+<title>The APIs</title>
+
+<p>
+The libhdfs APIs are a subset of: <a href="api/org/apache/hadoop/fs/FileSystem.html" >hadoop
fs APIs</a>.  
+</p>
+<p>
+The header file for libhdfs describes each API in detail and is available in ${HADOOP_HOME}/src/c++/libhdfs/hdfs.h
+</p>
+</section>
+<section>
+<title>A Sample Program</title>
+
+<source>
+#include "hdfs.h" 
+
+int main(int argc, char **argv) {
+
+    hdfsFS fs = hdfsConnect("default", 0);
+    const char* writePath = "/tmp/testfile.txt";
+    hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+    if(!writeFile) {
+          fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+          exit(-1);
+    }
+    char* buffer = "Hello, World!";
+    tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+    if (hdfsFlush(fs, writeFile)) {
+           fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+          exit(-1);
+    }
+   hdfsCloseFile(fs, writeFile);
+}
+</source>
+</section>
+
+<section>
+<title>How To Link With The Library</title>
+<p>
+See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_HOME}/src/c++/libhdfs/Makefile)
or something like:<br />
+gcc above_sample.c -I${HADOOP_HOME}/src/c++/libhdfs -L${HADOOP_HOME}/libhdfs -lhdfs -o above_sample
+</p>
+</section>
+<section>
+<title>Common Problems</title>
+<p>
+The most common problem is the CLASSPATH is not set properly when calling a program that
uses libhdfs. 
+Make sure you set it to all the Hadoop jars needed to run Hadoop itself. Currently, there
is no way to 
+programmatically generate the classpath, but a good bet is to include all the jar files in
${HADOOP_HOME} 
+and ${HADOOP_HOME}/lib as well as the right configuration directory containing hdfs-site.xml
+</p>
+</section>
+<section>
+<title>Thread Safe</title>
+<p>libdhfs is thread safe.</p>
+<ul>
+<li>Concurrency and Hadoop FS "handles" 
+<br />The Hadoop FS implementation includes a FS handle cache which caches based on
the URI of the 
+namenode along with the user connecting. So, all calls to hdfsConnect will return the same
handle but 
+calls to hdfsConnectAsUser with different users will return different handles.  But, since
HDFS client 
+handles are completely thread safe, this has no bearing on concurrency. 
+</li>
+<li>Concurrency and libhdfs/JNI 
+<br />The libhdfs calls to JNI should always be creating thread local storage, so (in
theory), libhdfs 
+should be as thread safe as the underlying calls to the Hadoop FS.
+</li>
+</ul>
+</section>
+</body>
+</document>

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/site.xml Mon Sep
21 23:07:44 2009
@@ -33,44 +33,20 @@
 <site label="Hadoop" href="" xmlns="http://apache.org/forrest/linkmap/1.0">
   
    <docs label="Getting Started"> 
-		<overview   				label="Overview" 					href="index.html" />
-		<quickstart 				label="Quick Start"        		href="quickstart.html" />
-		<setup     					label="Cluster Setup"      		href="cluster_setup.html" />
-		<mapred    				label="Map/Reduce Tutorial" 	href="mapred_tutorial.html" />
-  </docs>	
-		
- <docs label="Programming Guides">
-		<commands 				label="Commands"     					href="commands_manual.html" />
-		<distcp    					label="DistCp"       						href="distcp.html" />
-		<native_lib    				label="Native Libraries" 					href="native_libraries.html" />
-		<streaming 				label="Streaming"          				href="streaming.html" />
-		<fair_scheduler 			label="Fair Scheduler" 					href="fair_scheduler.html"/>
-        <hdfsproxy 			label="HDFS Proxy" 					href="hdfsproxy.html"/>
-		<cap_scheduler 		label="Capacity Scheduler" 			href="capacity_scheduler.html"/>
-		<SLA					 	label="Service Level Authorization" 	href="service_level_auth.html"/>
-		<vaidya    					label="Vaidya" 								href="vaidya.html"/>
-		<archives  				label="Archives"     						href="hadoop_archives.html"/>
+     <hdfsproxy 			label="HDFS Proxy" 					href="hdfsproxy.html"/>
+     <hdfs_user      				label="User Guide"    							href="hdfs_user_guide.html" />
+     <hdfs_arch     				label="Architecture"  								href="hdfs_design.html" />	
    </docs>
-   
-   <docs label="HDFS">
-		<hdfs_user      				label="User Guide"    							href="hdfs_user_guide.html" />
-		<hdfs_arch     				label="Architecture"  								href="hdfs_design.html" />	
-		<hdfs_fs       	 				label="File System Shell Guide"     		href="hdfs_shell.html" />
-		<hdfs_perm      				label="Permissions Guide"    					href="hdfs_permissions_guide.html"
/>
-		<hdfs_quotas     			label="Quotas Guide" 							href="hdfs_quota_admin_guide.html" />
-		<hdfs_SLG        			label="Synthetic Load Generator Guide"  href="SLG_user_guide.html"
/>
-		<hdfs_imageviewer						label="Offline Image Viewer Guide"	href="hdfs_imageviewer.html"
/>
-		<hdfs_libhdfs   				label="C API libhdfs"         						href="libhdfs.html" /> 
-                <docs label="Testing">
-                    <faultinject_framework              label="Fault Injection"      
                                              href="faultinject_framework.html" />
-                </docs>
-   </docs> 
-   
-   <docs label="HOD">
-		<hod_user 	label="User Guide" 	href="hod_user_guide.html"/>
-		<hod_admin 	label="Admin Guide" 	href="hod_admin_guide.html"/>
-		<hod_config 	label="Config Guide" 	href="hod_config_guide.html"/> 
-   </docs> 
+   <docs label="Guides">
+      <hdfs_perm      				label="Permissions Guide"    					href="hdfs_permissions_guide.html"
/>
+      <hdfs_quotas     			label="Quotas Guide" 							href="hdfs_quota_admin_guide.html"
/>
+      <hdfs_SLG        			label="Synthetic Load Generator Guide"  href="SLG_user_guide.html"
/>
+      <hdfs_imageviewer						label="Offline Image Viewer Guide"	href="hdfs_imageviewer.html"
/>
+      <hdfs_libhdfs   				label="C API libhdfs"         						href="libhdfs.html" />

+    </docs>
+    <docs label="Testing">
+      <faultinject_framework              label="Fault Injection"                    
                                href="faultinject_framework.html" />
+    </docs>
    
    <docs label="Miscellaneous"> 
 		<api       	label="API Docs"           href="ext:api/index" />
@@ -82,19 +58,20 @@
    </docs> 
    
   <external-refs>
-    <site      href="http://hadoop.apache.org/core/"/>
-    <lists     href="http://hadoop.apache.org/core/mailing_lists.html"/>
-    <archive   href="http://mail-archives.apache.org/mod_mbox/hadoop-core-commits/"/>
-    <releases  href="http://hadoop.apache.org/core/releases.html">
-      <download href="#Download" />
+    <site      href="http://hadoop.apache.org/hdfs/"/>
+    <lists     href="http://hadoop.apache.org/hdfs/mailing_lists.html"/>
+    <archive   href="http://mail-archives.apache.org/mod_mbox/hadoop-hdfs-commits/"/>
+    <releases  href="http://hadoop.apache.org/hdfs/releases.html">
+              <download href="#Download" />
     </releases>
-    <jira      href="http://hadoop.apache.org/core/issue_tracking.html"/>
-    <wiki      href="http://wiki.apache.org/hadoop/" />
-    <faq       href="http://wiki.apache.org/hadoop/FAQ" />
-    <hadoop-default href="http://hadoop.apache.org/core/docs/current/hadoop-default.html"
/>
-    <core-default href="http://hadoop.apache.org/core/docs/current/core-default.html"
/>
-    <hdfs-default href="http://hadoop.apache.org/core/docs/current/hdfs-default.html"
/>
-    <mapred-default href="http://hadoop.apache.org/core/docs/current/mapred-default.html"
/>
+    <jira      href="http://hadoop.apache.org/hdfs/issue_tracking.html"/>
+    <wiki      href="http://wiki.apache.org/hadoop/HDFS" />
+    <faq       href="http://wiki.apache.org/hadoop/HDFS/FAQ" />
+    
+    <common-default href="http://hadoop.apache.org/common/docs/current/common-default.html"
/>
+    <hdfs-default href="http://hadoop.apache.org/hdfs/docs/current/hdfs-default.html"
/>
+    <mapred-default href="http://hadoop.apache.org/mapreduce/docs/current/mapred-default.html"
/>
+    
     <zlib      href="http://www.zlib.net/" />
     <gzip      href="http://www.gzip.org/" />
     <bzip      href="http://www.bzip.org/" />

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/tabs.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/tabs.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/tabs.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/content/xdocs/tabs.xml Mon Sep
21 23:07:44 2009
@@ -30,8 +30,8 @@
     directory (ends in '/'), in which case /index.html will be added
   -->
 
-  <tab label="Project" href="http://hadoop.apache.org/core/" />
-  <tab label="Wiki" href="http://wiki.apache.org/hadoop" />
-  <tab label="Hadoop 0.21 Documentation" dir="" />  
+  <tab label="Project" href="http://hadoop.apache.org/hdfs/" />
+  <tab label="Wiki" href="http://wiki.apache.org/hadoop/hdfs" />
+  <tab label="HDFS 0.21 Documentation" dir="" />  
   
 </tabs>

Modified: hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/skinconf.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/skinconf.xml?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/skinconf.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/src/docs/src/documentation/skinconf.xml Mon Sep 21 23:07:44
2009
@@ -67,8 +67,8 @@
   <!-- project logo -->
   <project-name>Hadoop</project-name>
   <project-description>Scalable Computing Platform</project-description>
-  <project-url>http://hadoop.apache.org/core/</project-url>
-  <project-logo>images/core-logo.gif</project-logo>
+  <project-url>http://hadoop.apache.org/hdfs/</project-url>
+  <project-logo>images/hdfs-logo.jpg</project-logo>
 
   <!-- group logo -->
   <group-name>Hadoop</group-name>
@@ -146,13 +146,13 @@
     <!--Headers -->
 	#content h1 {
 	  margin-bottom: .5em;
-	  font-size: 200%; color: black;
+	  font-size: 185%; color: black;
 	  font-family: arial;
 	}  
-    h2, .h3 { font-size: 195%; color: black; font-family: arial; }
-	h3, .h4 { font-size: 140%; color: black; font-family: arial; margin-bottom: 0.5em; }
+    h2, .h3 { font-size: 175%; color: black; font-family: arial; }
+	h3, .h4 { font-size: 135%; color: black; font-family: arial; margin-bottom: 0.5em; }
 	h4, .h5 { font-size: 125%; color: black;  font-style: italic; font-weight: bold; font-family:
arial; }
-	h5, h6 { font-size: 110%; color: #363636; font-weight: bold; } 
+	h5, h6 { font-size: 110%; color: #363636; font-weight: bold; }    
    
    <!--Code Background -->
     pre.code {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Mon Sep 21
23:07:44 2009
@@ -575,6 +575,35 @@
     leasechecker.put(src, result);
     return result;
   }
+  
+  /**
+   * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
+   *  Progressable, int)}   except that the permission
+   *   is absolute (ie has already been masked with umask.
+   * 
+   */
+  public OutputStream primitiveCreate(String src, 
+                             FsPermission absPermission,
+                             EnumSet<CreateFlag> flag,
+                             boolean createParent,
+                             short replication,
+                             long blockSize,
+                             Progressable progress,
+                             int buffersize,
+                             int bytesPerChecksum)
+    throws IOException {
+    checkOpen();
+    if (absPermission == null) {
+      absPermission = 
+        FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
+    } 
+    LOG.debug(src + ": masked=" + absPermission);
+    OutputStream result = new DFSOutputStream(src, absPermission,
+        flag, createParent, replication, blockSize, progress, buffersize,
+        bytesPerChecksum);
+    leasechecker.put(src, result);
+    return result;
+  } 
 
   /**
    * Append to an existing HDFS file.  
@@ -1003,6 +1032,28 @@
                                      FileAlreadyExistsException.class);
     }
   }
+  
+  /**
+   * Same {{@link #mkdirs(String, FsPermission, boolean)} except
+   * that the permissions has already been masked against umask.
+   */
+  public boolean primitiveMkdir(String src, FsPermission absPermission)
+    throws IOException{
+    checkOpen();
+    if (absPermission == null) {
+      absPermission = 
+        FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
+    } 
+
+    LOG.debug(src + ": masked=" + absPermission);
+    try {
+      return namenode.mkdirs(src, absPermission, true);
+    } catch(RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     NSQuotaExceededException.class,
+                                     DSQuotaExceededException.class);
+    }
+  }
 
   ContentSummary getContentSummary(String src) throws IOException {
     try {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Mon Sep 21 23:07:44 2009
@@ -177,6 +177,13 @@
     }
     return dfs.getBlockLocations(getPathName(file.getPath()), start, len);
   }
+  
+  @Override
+  public BlockLocation[] getFileBlockLocations(Path p, 
+      long start, long len) throws IOException {
+    return dfs.getBlockLocations(getPathName(p), start, len);
+
+  }
 
   @Override
   public void setVerifyChecksum(boolean verifyChecksum) {
@@ -203,11 +210,21 @@
     EnumSet<CreateFlag> flag, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
 
-    return new FSDataOutputStream
-       (dfs.create(getPathName(f), permission,
+    return new FSDataOutputStream(dfs.create(getPathName(f), permission,
                    flag, replication, blockSize, progress, bufferSize),
         statistics);
   }
+  
+  @SuppressWarnings("deprecation")
+  @Override
+  protected FSDataOutputStream primitiveCreate(Path f,
+    FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
+    short replication, long blockSize, Progressable progress,
+    int bytesPerChecksum) throws IOException {
+    return new FSDataOutputStream(dfs.primitiveCreate(getPathName(f),
+        absolutePermission, flag, true, replication, blockSize,
+        progress, bufferSize, bytesPerChecksum),statistics);
+   } 
 
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
@@ -293,6 +310,13 @@
     return dfs.mkdirs(getPathName(f), permission, true);
   }
 
+  @SuppressWarnings("deprecation")
+  @Override
+  protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
+    throws IOException {
+    return dfs.primitiveMkdir(getPathName(f), absolutePermission);
+  }
+
   /** {@inheritDoc} */
   @Override
   public void close() throws IOException {

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Mon
Sep 21 23:07:44 2009
@@ -39,14 +39,12 @@
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -59,7 +57,6 @@
 import org.xml.sax.XMLReader;
 import org.xml.sax.helpers.DefaultHandler;
 import org.xml.sax.helpers.XMLReaderFactory;
-import org.apache.hadoop.hdfs.ByteRangeInputStream;
 
 
 
@@ -298,7 +295,7 @@
 
   @Override
   public Path getWorkingDirectory() {
-    return new Path("/").makeQualified(this);
+    return new Path("/").makeQualified(getUri(), null);
   }
 
   @Override

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java
(original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java
Mon Sep 21 23:07:44 2009
@@ -19,10 +19,8 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node; 
 import org.apache.hadoop.util.ReflectionUtils;
 import java.util.*;
 

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
(original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
Mon Sep 21 23:07:44 2009
@@ -23,7 +23,6 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;

Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=817453&r1=817452&r2=817453&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Mon Sep 21 23:07:44 2009
@@ -2443,7 +2443,6 @@
     boolean firstOne = true;
     while (nonExcess.size() - replication > 0) {
       DatanodeInfo cur = null;
-      long minSpace = Long.MAX_VALUE;
 
       // check if we can del delNodeHint
       if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint)
&&

Added: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=817453&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
(added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
Mon Sep 21 23:07:44 2009
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestHDFSFileContextMainOperations extends
+                                  FileContextMainOperationsBaseTest {
+  
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining()
+                                    throws IOException, LoginException  {
+    cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+    fc = FileContext.getFileContext(cluster.getFileSystem());
+    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+        UnixUserGroupInformation.login().getUserName()));
+    fc.mkdirs(defaultWorkingDirectory, FileContext.DEFAULT_PERM);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+  
+  @Before
+  public void setUp() throws Exception {
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+  @Override
+  protected Path getDefaultWorkingDirectory() {
+    return defaultWorkingDirectory;
+  } 
+  
+  @Override
+  @Test
+  public void testRenameFileAsExistingFile() throws Exception {
+    // ignore base class test till hadoop-6240 is fixed
+  }
+}

Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain



Mime
View raw message