hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1463203 [4/7] - in /hadoop/common/branches/HDFS-347/hadoop-common-project: hadoop-annotations/ hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ hadoop-common/ hadoop-common/dev-support/ hadoop-common/src/ hadoop-co...
Date Mon, 01 Apr 2013 16:47:27 GMT
Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java Mon Apr  1 16:47:16 2013
@@ -76,20 +76,24 @@ public class TableMapping extends Cached
     getRawMapping().setConf(conf);
   }
   
+  @Override
+  public void reloadCachedMappings() {
+    super.reloadCachedMappings();
+    getRawMapping().reloadCachedMappings();
+  }
+  
   private static final class RawTableMapping extends Configured
       implements DNSToSwitchMapping {
     
-    private final Map<String, String> map = new HashMap<String, String>();
-    private boolean initialized = false;
+    private Map<String, String> map;
   
-    private synchronized void load() {
-      map.clear();
+    private Map<String, String> load() {
+      Map<String, String> loadMap = new HashMap<String, String>();
   
       String filename = getConf().get(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, null);
       if (StringUtils.isBlank(filename)) {
-        LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. "
-            + NetworkTopology.DEFAULT_RACK + " will be returned.");
-        return;
+        LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. ");
+        return null;
       }
   
       BufferedReader reader = null;
@@ -101,7 +105,7 @@ public class TableMapping extends Cached
           if (line.length() != 0 && line.charAt(0) != '#') {
             String[] columns = line.split("\\s+");
             if (columns.length == 2) {
-              map.put(columns[0], columns[1]);
+              loadMap.put(columns[0], columns[1]);
             } else {
               LOG.warn("Line does not have two columns. Ignoring. " + line);
             }
@@ -109,29 +113,31 @@ public class TableMapping extends Cached
           line = reader.readLine();
         }
       } catch (Exception e) {
-        LOG.warn(filename + " cannot be read. " + NetworkTopology.DEFAULT_RACK
-            + " will be returned.", e);
-        map.clear();
+        LOG.warn(filename + " cannot be read.", e);
+        return null;
       } finally {
         if (reader != null) {
           try {
             reader.close();
           } catch (IOException e) {
-            LOG.warn(filename + " cannot be read. "
-                + NetworkTopology.DEFAULT_RACK + " will be returned.", e);
-            map.clear();
+            LOG.warn(filename + " cannot be read.", e);
+            return null;
           }
         }
       }
+      return loadMap;
     }
   
     @Override
     public synchronized List<String> resolve(List<String> names) {
-      if (!initialized) {
-        initialized = true;
-        load();
+      if (map == null) {
+        map = load();
+        if (map == null) {
+          LOG.warn("Failed to read topology table. " +
+            NetworkTopology.DEFAULT_RACK + " will be used for all nodes.");
+          map = new HashMap<String, String>();
+        }
       }
-  
       List<String> results = new ArrayList<String>(names.size());
       for (String name : names) {
         String result = map.get(name);
@@ -143,6 +149,18 @@ public class TableMapping extends Cached
       }
       return results;
     }
-    
+
+    @Override
+    public void reloadCachedMappings() {
+      Map<String, String> newMap = load();
+      if (newMap == null) {
+        LOG.error("Failed to reload the topology table.  The cached " +
+            "mappings will not be cleared.");
+      } else {
+        synchronized(this) {
+          map = newMap;
+        }
+      }
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java Mon Apr  1 16:47:16 2013
@@ -192,7 +192,7 @@ public class Buffer implements Comparabl
     int hash = 1;
     byte[] b = this.get();
     for (int i = 0; i < count; i++)
-      hash = (31 * hash) + (int)b[i];
+      hash = (31 * hash) + b[i];
     return hash;
   }
   

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java Mon Apr  1 16:47:16 2013
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 
 /**
- * Interface that alll the serializers have to implement.
+ * Interface that all the serializers have to implement.
  * 
  * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
  */

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java Mon Apr  1 16:47:16 2013
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.security;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
+
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -25,7 +27,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 /**
  * This class implements parsing and handling of Kerberos principal names. In 
  * particular, it splits them apart and translates them down into local
@@ -36,15 +37,6 @@ import org.apache.hadoop.fs.CommonConfig
 @InterfaceStability.Evolving
 public class HadoopKerberosName extends KerberosName {
 
-  static {
-    try {
-      KerberosUtil.getDefaultRealm();
-    } catch (Exception ke) {
-      if(UserGroupInformation.isSecurityEnabled())
-        throw new IllegalArgumentException("Can't get Kerberos configuration",ke);
-    }
-  }
-
   /**
    * Create a name from the full Kerberos principal name.
    * @param name
@@ -63,7 +55,23 @@ public class HadoopKerberosName extends 
    * @throws IOException
    */
   public static void setConfiguration(Configuration conf) throws IOException {
-    String ruleString = conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "DEFAULT");
+    final String defaultRule;
+    switch (SecurityUtil.getAuthenticationMethod(conf)) {
+      case KERBEROS:
+      case KERBEROS_SSL:
+        try {
+          KerberosUtil.getDefaultRealm();
+        } catch (Exception ke) {
+          throw new IllegalArgumentException("Can't get Kerberos realm", ke);
+        }
+        defaultRule = "DEFAULT";
+        break;
+      default:
+        // just extract the simple user name
+        defaultRule = "RULE:[1:$1] RULE:[2:$1]";
+        break; 
+    }
+    String ruleString = conf.get(HADOOP_SECURITY_AUTH_TO_LOCAL, defaultRule);
     setRules(ruleString);
   }
 

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java Mon Apr  1 16:47:16 2013
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Hashtable;
 import java.util.List;
 
+import javax.naming.CommunicationException;
 import javax.naming.Context;
 import javax.naming.NamingEnumeration;
 import javax.naming.NamingException;
@@ -166,6 +167,8 @@ public class LdapGroupsMapping
   private String groupMemberAttr;
   private String groupNameAttr;
 
+  public static int RECONNECT_RETRY_COUNT = 3;
+  
   /**
    * Returns list of groups for a user.
    * 
@@ -178,34 +181,63 @@ public class LdapGroupsMapping
    */
   @Override
   public synchronized List<String> getGroups(String user) throws IOException {
+    List<String> emptyResults = new ArrayList<String>();
+    /*
+     * Normal garbage collection takes care of removing Context instances when they are no longer in use. 
+     * Connections used by Context instances being garbage collected will be closed automatically.
+     * So in case connection is closed and gets CommunicationException, retry some times with new new DirContext/connection. 
+     */
+    try {
+      return doGetGroups(user);
+    } catch (CommunicationException e) {
+      LOG.warn("Connection is closed, will try to reconnect");
+    } catch (NamingException e) {
+      LOG.warn("Exception trying to get groups for user " + user, e);
+      return emptyResults;
+    }
+
+    int retryCount = 0;
+    while (retryCount ++ < RECONNECT_RETRY_COUNT) {
+      //reset ctx so that new DirContext can be created with new connection
+      this.ctx = null;
+      
+      try {
+        return doGetGroups(user);
+      } catch (CommunicationException e) {
+        LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
+      } catch (NamingException e) {
+        LOG.warn("Exception trying to get groups for user " + user, e);
+        return emptyResults;
+      }
+    }
+    
+    return emptyResults;
+  }
+  
+  List<String> doGetGroups(String user) throws NamingException {
     List<String> groups = new ArrayList<String>();
 
-    try {
-      DirContext ctx = getDirContext();
+    DirContext ctx = getDirContext();
 
-      // Search for the user. We'll only ever need to look at the first result
-      NamingEnumeration<SearchResult> results = ctx.search(baseDN,
-                                                           userSearchFilter,
-                                                           new Object[]{user},
-                                                           SEARCH_CONTROLS);
-      if (results.hasMoreElements()) {
-        SearchResult result = results.nextElement();
-        String userDn = result.getNameInNamespace();
+    // Search for the user. We'll only ever need to look at the first result
+    NamingEnumeration<SearchResult> results = ctx.search(baseDN,
+        userSearchFilter,
+        new Object[]{user},
+        SEARCH_CONTROLS);
+    if (results.hasMoreElements()) {
+      SearchResult result = results.nextElement();
+      String userDn = result.getNameInNamespace();
 
-        NamingEnumeration<SearchResult> groupResults =
+      NamingEnumeration<SearchResult> groupResults =
           ctx.search(baseDN,
-                     "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
-                     new Object[]{userDn},
-                     SEARCH_CONTROLS);
-        while (groupResults.hasMoreElements()) {
-          SearchResult groupResult = groupResults.nextElement();
-          Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
-          groups.add(groupName.get().toString());
-        }
+              "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
+              new Object[]{userDn},
+              SEARCH_CONTROLS);
+      while (groupResults.hasMoreElements()) {
+        SearchResult groupResult = groupResults.nextElement();
+        Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
+        groups.add(groupName.get().toString());
       }
-    } catch (NamingException e) {
-      LOG.warn("Exception trying to get groups for user " + user, e);
-      return new ArrayList<String>();
     }
 
     return groups;
@@ -236,7 +268,7 @@ public class LdapGroupsMapping
 
     return ctx;
   }
-
+  
   /**
    * Caches groups, no need to do that for this provider
    */

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java Mon Apr  1 16:47:16 2013
@@ -30,6 +30,7 @@ import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Locale;
 import java.util.ServiceLoader;
 import java.util.Set;
 
@@ -219,7 +220,7 @@ public class SecurityUtil {
     if (fqdn == null || fqdn.isEmpty() || fqdn.equals("0.0.0.0")) {
       fqdn = getLocalHostName();
     }
-    return components[0] + "/" + fqdn.toLowerCase() + "@" + components[2];
+    return components[0] + "/" + fqdn.toLowerCase(Locale.US) + "@" + components[2];
   }
   
   static String getLocalHostName() throws UnknownHostException {

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java Mon Apr  1 16:47:16 2013
@@ -86,7 +86,8 @@ public class ShellBasedUnixGroupsMapping
       LOG.warn("got exception trying to get groups for user " + user, e);
     }
     
-    StringTokenizer tokenizer = new StringTokenizer(result);
+    StringTokenizer tokenizer =
+        new StringTokenizer(result, Shell.TOKEN_SEPARATOR_REGEX);
     List<String> groups = new LinkedList<String>();
     while (tokenizer.hasMoreTokens()) {
       groups.add(tokenizer.nextToken());

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Mon Apr  1 16:47:16 2013
@@ -53,20 +53,20 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * User and group information for Hadoop.
  * This class wraps around a JAAS Subject and provides methods to determine the
@@ -190,8 +190,6 @@ public class UserGroupInformation {
 
   /** Metrics to track UGI activity */
   static UgiMetrics metrics = UgiMetrics.create();
-  /** Are the static variables that depend on configuration initialized? */
-  private static boolean isInitialized = false;
   /** The auth method to use */
   private static AuthenticationMethod authenticationMethod;
   /** Server-side groups fetching service */
@@ -211,8 +209,8 @@ public class UserGroupInformation {
    * Must be called before useKerberos or groups is used.
    */
   private static synchronized void ensureInitialized() {
-    if (!isInitialized) {
-        initialize(new Configuration(), KerberosName.hasRulesBeenSet());
+    if (conf == null) {
+      initialize(new Configuration(), false);
     }
   }
 
@@ -220,25 +218,17 @@ public class UserGroupInformation {
    * Initialize UGI and related classes.
    * @param conf the configuration to use
    */
-  private static synchronized void initialize(Configuration conf, boolean skipRulesSetting) {
-    initUGI(conf);
-    // give the configuration on how to translate Kerberos names
-    try {
-      if (!skipRulesSetting) {
+  private static synchronized void initialize(Configuration conf,
+                                              boolean overrideNameRules) {
+    authenticationMethod = SecurityUtil.getAuthenticationMethod(conf);
+    if (overrideNameRules || !HadoopKerberosName.hasRulesBeenSet()) {
+      try {
         HadoopKerberosName.setConfiguration(conf);
+      } catch (IOException ioe) {
+        throw new RuntimeException(
+            "Problem with Kerberos auth_to_local name configuration", ioe);
       }
-    } catch (IOException ioe) {
-      throw new RuntimeException("Problem with Kerberos auth_to_local name " +
-          "configuration", ioe);
     }
-  }
-  
-  /**
-   * Set the configuration values for UGI.
-   * @param conf the configuration to use
-   */
-  private static synchronized void initUGI(Configuration conf) {
-    authenticationMethod = SecurityUtil.getAuthenticationMethod(conf);
     try {
         kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong(
                 HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN,
@@ -253,7 +243,6 @@ public class UserGroupInformation {
     if (!(groups instanceof TestingGroups)) {
       groups = Groups.getUserToGroupsMappingService(conf);
     }
-    isInitialized = true;
     UserGroupInformation.conf = conf;
   }
 
@@ -266,7 +255,18 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public static void setConfiguration(Configuration conf) {
-    initialize(conf, false);
+    initialize(conf, true);
+  }
+  
+  @InterfaceAudience.Private
+  @VisibleForTesting
+  static void reset() {
+    authenticationMethod = null;
+    conf = null;
+    groups = null;
+    kerberosMinSecondsBeforeRelogin = 0;
+    setLoginUser(null);
+    HadoopKerberosName.setRules(null);
   }
   
   /**
@@ -713,7 +713,8 @@ public class UserGroupInformation {
 
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
-  synchronized static void setLoginUser(UserGroupInformation ugi) {
+  @VisibleForTesting
+  public synchronized static void setLoginUser(UserGroupInformation ugi) {
     // if this is to become stable, should probably logout the currently
     // logged in ugi if it's different
     loginUser = ugi;
@@ -1498,7 +1499,7 @@ public class UserGroupInformation {
       } else if (cause instanceof InterruptedException) {
         throw (InterruptedException) cause;
       } else {
-        throw new UndeclaredThrowableException(pae,"Unknown exception in doAs");
+        throw new UndeclaredThrowableException(cause);
       }
     }
   }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java Mon Apr  1 16:47:16 2013
@@ -164,7 +164,9 @@ public class FileBasedKeyStoresFactory i
     } else {
       keystore.load(null, null);
     }
-    KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
+    KeyManagerFactory keyMgrFactory = KeyManagerFactory
+        .getInstance(SSLFactory.SSLCERTIFICATE);
+      
     keyMgrFactory.init(keystore, (keystorePassword != null) ?
                                  keystorePassword.toCharArray() : null);
     keyManagers = keyMgrFactory.getKeyManagers();

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java Mon Apr  1 16:47:16 2013
@@ -169,8 +169,8 @@ public final class ReloadingX509TrustMan
       in.close();
     }
 
-    TrustManagerFactory trustManagerFactory =
-      TrustManagerFactory.getInstance("SunX509");
+    TrustManagerFactory trustManagerFactory = 
+      TrustManagerFactory.getInstance(SSLFactory.SSLCERTIFICATE);
     trustManagerFactory.init(ks);
     TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
     for (TrustManager trustManager1 : trustManagers) {

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java Mon Apr  1 16:47:16 2013
@@ -58,6 +58,9 @@ public class SSLFactory implements Conne
     "hadoop.ssl.client.conf";
   public static final String SSL_SERVER_CONF_KEY =
     "hadoop.ssl.server.conf";
+  private static final boolean IBMJAVA = 
+      System.getProperty("java.vendor").contains("IBM");
+  public static final String SSLCERTIFICATE = IBMJAVA?"ibmX509":"SunX509"; 
 
   public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
 

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java Mon Apr  1 16:47:16 2013
@@ -23,11 +23,10 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 
 /**
  * Class that provides utility functions for checking disk problem
@@ -36,10 +35,16 @@ import org.apache.hadoop.fs.permission.F
 @InterfaceStability.Unstable
 public class DiskChecker {
 
+  private static final long SHELL_TIMEOUT = 10 * 1000;
+
   public static class DiskErrorException extends IOException {
     public DiskErrorException(String msg) {
       super(msg);
     }
+
+    public DiskErrorException(String msg, Throwable cause) {
+      super(msg, cause);
+    }
   }
     
   public static class DiskOutOfSpaceException extends IOException {
@@ -85,25 +90,11 @@ public class DiskChecker {
    * @throws DiskErrorException
    */
   public static void checkDir(File dir) throws DiskErrorException {
-    if (!mkdirsWithExistsCheck(dir))
+    if (!mkdirsWithExistsCheck(dir)) {
       throw new DiskErrorException("Can not create directory: "
                                    + dir.toString());
-
-    if (!dir.isDirectory())
-      throw new DiskErrorException("Not a directory: "
-                                   + dir.toString());
-
-    if (!dir.canRead())
-      throw new DiskErrorException("Directory is not readable: "
-                                   + dir.toString());
-
-    if (!dir.canWrite())
-      throw new DiskErrorException("Directory is not writable: "
-                                   + dir.toString());
-
-    if (!dir.canExecute())
-      throw new DiskErrorException("Directory is not executable: "
-	  + dir.toString());
+    }
+    checkDirAccess(dir);
   }
 
   /**
@@ -152,24 +143,102 @@ public class DiskChecker {
                               FsPermission expected)
   throws DiskErrorException, IOException {
     mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
+    checkDirAccess(localFS.pathToFile(dir));
+  }
 
-    FileStatus stat = localFS.getFileStatus(dir);
-    FsPermission actual = stat.getPermission();
+  /**
+   * Checks that the given file is a directory and that the current running
+   * process can read, write, and execute it.
+   * 
+   * @param dir File to check
+   * @throws DiskErrorException if dir is not a directory, not readable, not
+   *   writable, or not executable
+   */
+  private static void checkDirAccess(File dir) throws DiskErrorException {
+    if (!dir.isDirectory()) {
+      throw new DiskErrorException("Not a directory: "
+                                   + dir.toString());
+    }
 
-    if (!stat.isDirectory())
-      throw new DiskErrorException("not a directory: "+ dir.toString());
+    if (Shell.WINDOWS) {
+      checkAccessByFileSystemInteraction(dir);
+    } else {
+      checkAccessByFileMethods(dir);
+    }
+  }
 
-    FsAction user = actual.getUserAction();
-    if (!user.implies(FsAction.READ))
-      throw new DiskErrorException("directory is not readable: "
+  /**
+   * Checks that the current running process can read, write, and execute the
+   * given directory by using methods of the File object.
+   * 
+   * @param dir File to check
+   * @throws DiskErrorException if dir is not readable, not writable, or not
+   *   executable
+   */
+  private static void checkAccessByFileMethods(File dir)
+      throws DiskErrorException {
+    if (!dir.canRead()) {
+      throw new DiskErrorException("Directory is not readable: "
+                                   + dir.toString());
+    }
+
+    if (!dir.canWrite()) {
+      throw new DiskErrorException("Directory is not writable: "
                                    + dir.toString());
+    }
 
-    if (!user.implies(FsAction.WRITE))
-      throw new DiskErrorException("directory is not writable: "
+    if (!dir.canExecute()) {
+      throw new DiskErrorException("Directory is not executable: "
                                    + dir.toString());
+    }
+  }
 
-    if (!user.implies(FsAction.EXECUTE))
-      throw new DiskErrorException("directory is not listable: "
+  /**
+   * Checks that the current running process can read, write, and execute the
+   * given directory by attempting each of those operations on the file system.
+   * This method contains several workarounds to known JVM bugs that cause
+   * File.canRead, File.canWrite, and File.canExecute to return incorrect results
+   * on Windows with NTFS ACLs.  See:
+   * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6203387
+   * These bugs are supposed to be fixed in JDK7.
+   * 
+   * @param dir File to check
+   * @throws DiskErrorException if dir is not readable, not writable, or not
+   *   executable
+   */
+  private static void checkAccessByFileSystemInteraction(File dir)
+      throws DiskErrorException {
+    // Make sure we can read the directory by listing it.
+    if (dir.list() == null) {
+      throw new DiskErrorException("Directory is not readable: "
                                    + dir.toString());
+    }
+
+    // Make sure we can write to the directory by creating a temp file in it.
+    try {
+      File tempFile = File.createTempFile("checkDirAccess", null, dir);
+      if (!tempFile.delete()) {
+        throw new DiskErrorException("Directory is not writable: "
+                                     + dir.toString());
+      }
+    } catch (IOException e) {
+      throw new DiskErrorException("Directory is not writable: "
+                                   + dir.toString(), e);
+    }
+
+    // Make sure the directory is executable by trying to cd into it.  This
+    // launches a separate process.  It does not change the working directory of
+    // the current process.
+    try {
+      String[] cdCmd = new String[] { "cmd", "/C", "cd",
+          dir.getAbsolutePath() };
+      Shell.execCommand(null, cdCmd, SHELL_TIMEOUT);
+    } catch (Shell.ExitCodeException e) {
+      throw new DiskErrorException("Directory is not executable: "
+                                   + dir.toString(), e);
+    } catch (IOException e) {
+      throw new DiskErrorException("Directory is not executable: "
+                                   + dir.toString(), e);
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java Mon Apr  1 16:47:16 2013
@@ -42,6 +42,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * <code>GenericOptionsParser</code> is a utility to parse command line
@@ -321,15 +323,17 @@ public class GenericOptionsParser {
       String fileName = line.getOptionValue("tokenCacheFile");
       // check if the local file exists
       FileSystem localFs = FileSystem.getLocal(conf);
-      Path p = new Path(fileName);
+      Path p = localFs.makeQualified(new Path(fileName));
       if (!localFs.exists(p)) {
           throw new FileNotFoundException("File "+fileName+" does not exist.");
       }
       if(LOG.isDebugEnabled()) {
         LOG.debug("setting conf tokensFile: " + fileName);
       }
-      conf.set("mapreduce.job.credentials.json", localFs.makeQualified(p)
-          .toString(), "from -tokenCacheFile command line option");
+      UserGroupInformation.getCurrentUser().addCredentials(
+          Credentials.readTokenStorageFile(p, conf));
+      conf.set("mapreduce.job.credentials.json", p.toString(),
+               "from -tokenCacheFile command line option");
 
     }
   }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java Mon Apr  1 16:47:16 2013
@@ -60,7 +60,7 @@ class NativeCrc32 {
         fileName, basePos);
   }
   
-  private static native void nativeVerifyChunkedSums(
+    private static native void nativeVerifyChunkedSums(
       int bytesPerSum, int checksumType,
       ByteBuffer sums, int sumsOffset,
       ByteBuffer data, int dataOffset, int dataLength,

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java Mon Apr  1 16:47:16 2013
@@ -32,9 +32,10 @@ public class PlatformName {
    * The complete platform 'name' to identify the platform as 
    * per the java-vm.
    */
-  private static final String platformName = System.getProperty("os.name") + "-" + 
-    System.getProperty("os.arch") + "-" +
-    System.getProperty("sun.arch.data.model");
+  private static final String platformName =
+      (Shell.WINDOWS ? System.getenv("os") : System.getProperty("os.name"))
+      + "-" + System.getProperty("os.arch")
+      + "-" + System.getProperty("sun.arch.data.model");
   
   /**
    * Get the complete platform as per the java-vm.

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java Mon Apr  1 16:47:16 2013
@@ -21,6 +21,7 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -44,46 +45,208 @@ abstract public class Shell {
   
   public static final Log LOG = LogFactory.getLog(Shell.class);
   
+  private static boolean IS_JAVA7_OR_ABOVE =
+      System.getProperty("java.version").substring(0, 3).compareTo("1.7") >= 0;
+
+  public static boolean isJava7OrAbove() {
+    return IS_JAVA7_OR_ABOVE;
+  }
+
   /** a Unix command to get the current user's name */
   public final static String USER_NAME_COMMAND = "whoami";
+
+  /** Windows CreateProcess synchronization object */
+  public static final Object WindowsProcessLaunchLock = new Object();
+
   /** a Unix command to get the current user's groups list */
   public static String[] getGroupsCommand() {
-    return new String[]{"bash", "-c", "groups"};
+    return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
+                    : new String[]{"bash", "-c", "groups"};
   }
+
   /** a Unix command to get a given user's groups list */
   public static String[] getGroupsForUserCommand(final String user) {
     //'groups username' command return is non-consistent across different unixes
-    return new String [] {"bash", "-c", "id -Gn " + user};
+    return (WINDOWS)? new String[] { WINUTILS, "groups", "-F", "\"" + user + "\""}
+                    : new String [] {"bash", "-c", "id -Gn " + user};
   }
+
   /** a Unix command to get a given netgroup's user list */
   public static String[] getUsersForNetgroupCommand(final String netgroup) {
     //'groups username' command return is non-consistent across different unixes
-    return new String [] {"bash", "-c", "getent netgroup " + netgroup};
+    return (WINDOWS)? new String [] {"cmd", "/c", "getent netgroup " + netgroup}
+                    : new String [] {"bash", "-c", "getent netgroup " + netgroup};
+  }
+
+  /** Return a command to get permission information. */
+  public static String[] getGetPermissionCommand() {
+    return (WINDOWS) ? new String[] { WINUTILS, "ls", "-F" }
+                     : new String[] { "/bin/ls", "-ld" };
+  }
+
+  /** Return a command to set permission */
+  public static String[] getSetPermissionCommand(String perm, boolean recursive) {
+    if (recursive) {
+      return (WINDOWS) ? new String[] { WINUTILS, "chmod", "-R", perm }
+                         : new String[] { "chmod", "-R", perm };
+    } else {
+      return (WINDOWS) ? new String[] { WINUTILS, "chmod", perm }
+                       : new String[] { "chmod", perm };
+    }
+  }
+
+  /**
+   * Return a command to set permission for specific file.
+   * 
+   * @param perm String permission to set
+   * @param recursive boolean true to apply to all sub-directories recursively
+   * @param file String file to set
+   * @return String[] containing command and arguments
+   */
+  public static String[] getSetPermissionCommand(String perm, boolean recursive,
+                                                 String file) {
+    String[] baseCmd = getSetPermissionCommand(perm, recursive);
+    String[] cmdWithFile = Arrays.copyOf(baseCmd, baseCmd.length + 1);
+    cmdWithFile[cmdWithFile.length - 1] = file;
+    return cmdWithFile;
+  }
+
+  /** Return a command to set owner */
+  public static String[] getSetOwnerCommand(String owner) {
+    return (WINDOWS) ? new String[] { WINUTILS, "chown", "\"" + owner + "\"" }
+                     : new String[] { "chown", owner };
+  }
+  
+  /** Return a command to create symbolic links */
+  public static String[] getSymlinkCommand(String target, String link) {
+    return WINDOWS ? new String[] { WINUTILS, "symlink", link, target }
+                   : new String[] { "ln", "-s", target, link };
   }
+
   /** a Unix command to set permission */
   public static final String SET_PERMISSION_COMMAND = "chmod";
   /** a Unix command to set owner */
   public static final String SET_OWNER_COMMAND = "chown";
+
+  /** a Unix command to set the change user's groups list */
   public static final String SET_GROUP_COMMAND = "chgrp";
   /** a Unix command to create a link */
   public static final String LINK_COMMAND = "ln";
   /** a Unix command to get a link target */
   public static final String READ_LINK_COMMAND = "readlink";
-  /** Return a Unix command to get permission information. */
-  public static String[] getGET_PERMISSION_COMMAND() {
-    //force /bin/ls, except on windows.
-    return new String[] {(WINDOWS ? "ls" : "/bin/ls"), "-ld"};
-  }
 
   /**Time after which the executing script would be timedout*/
   protected long timeOutInterval = 0L;
   /** If or not script timed out*/
   private AtomicBoolean timedOut;
 
+
+  /** Centralized logic to discover and validate the sanity of the Hadoop 
+   *  home directory. Returns either NULL or a directory that exists and 
+   *  was specified via either -Dhadoop.home.dir or the HADOOP_HOME ENV 
+   *  variable.  This does a lot of work so it should only be called 
+   *  privately for initialization once per process.
+   **/
+  private static String checkHadoopHome() {
+
+    // first check the Dflag hadoop.home.dir with JVM scope
+    String home = System.getProperty("hadoop.home.dir");
+
+    // fall back to the system/user-global env variable
+    if (home == null) {
+      home = System.getenv("HADOOP_HOME");
+    }
+
+    try {
+       // couldn't find either setting for hadoop's home directory
+       if (home == null) {
+         throw new IOException("HADOOP_HOME or hadoop.home.dir are not set.");
+       }
+
+       if (home.startsWith("\"") && home.endsWith("\"")) {
+         home = home.substring(1, home.length()-1);
+       }
+
+       // check that the home setting is actually a directory that exists
+       File homedir = new File(home);
+       if (!homedir.isAbsolute() || !homedir.exists() || !homedir.isDirectory()) {
+         throw new IOException("Hadoop home directory " + homedir
+           + " does not exist, is not a directory, or is not an absolute path.");
+       }
+
+       home = homedir.getCanonicalPath();
+
+    } catch (IOException ioe) {
+       LOG.error("Failed to detect a valid hadoop home directory", ioe);
+       home = null;
+    }
+    
+    return home;
+  }
+  private static String HADOOP_HOME_DIR = checkHadoopHome();
+
+  // Public getter, throws an exception if HADOOP_HOME failed validation
+  // checks and is being referenced downstream.
+  public static final String getHadoopHome() throws IOException {
+    if (HADOOP_HOME_DIR == null) {
+      throw new IOException("Misconfigured HADOOP_HOME cannot be referenced.");
+    }
+
+    return HADOOP_HOME_DIR;
+  }
+
+  /** fully qualify the path to a binary that should be in a known hadoop 
+   *  bin location. This is primarily useful for disambiguating call-outs 
+   *  to executable sub-components of Hadoop to avoid clashes with other 
+   *  executables that may be in the path.  Caveat:  this call doesn't 
+   *  just format the path to the bin directory.  It also checks for file 
+   *  existence of the composed path. The output of this call should be 
+   *  cached by callers.
+   * */
+  public static final String getQualifiedBinPath(String executable) 
+  throws IOException {
+    // construct hadoop bin path to the specified executable
+    String fullExeName = HADOOP_HOME_DIR + File.separator + "bin" 
+      + File.separator + executable;
+
+    File exeFile = new File(fullExeName);
+    if (!exeFile.exists()) {
+      throw new IOException("Could not locate executable " + fullExeName
+        + " in the Hadoop binaries.");
+    }
+
+    return exeFile.getCanonicalPath();
+  }
+
   /** Set to true on Windows platforms */
   public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
                 = System.getProperty("os.name").startsWith("Windows");
+
+  public static final boolean LINUX
+                = System.getProperty("os.name").startsWith("Linux");
   
+  /** a Windows utility to emulate Unix commands */
+  public static final String WINUTILS = getWinUtilsPath();
+
+  public static final String getWinUtilsPath() {
+    String winUtilsPath = null;
+
+    try {
+      if (WINDOWS) {
+        winUtilsPath = getQualifiedBinPath("winutils.exe");
+      }
+    } catch (IOException ioe) {
+       LOG.error("Failed to locate the winutils binary in the hadoop binary path",
+         ioe);
+    }
+
+    return winUtilsPath;
+  }
+
+  /** Token separator regex used to parse Shell tool outputs */
+  public static final String TOKEN_SEPARATOR_REGEX
+                = WINDOWS ? "[|\n\r]" : "[ \t\n\r\f]";
+
   private long    interval;   // refresh interval in msec
   private long    lastTime;   // last time the command was performed
   private Map<String, String> environment; // env for the command execution
@@ -144,7 +307,19 @@ abstract public class Shell {
       builder.directory(this.dir);
     }
     
-    process = builder.start();
+    if (Shell.WINDOWS) {
+      synchronized (WindowsProcessLaunchLock) {
+        // To workaround the race condition issue with child processes
+        // inheriting unintended handles during process launch that can
+        // lead to hangs on reading output and error streams, we
+        // serialize process creation. More info available at:
+        // http://support.microsoft.com/kb/315939
+        process = builder.start();
+      }
+    } else {
+      process = builder.start();
+    }
+
     if (timeOutInterval > 0) {
       timeOutTimer = new Timer("Shell command timeout");
       timeoutTimerTask = new ShellTimeoutTimerTask(

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java Mon Apr  1 16:47:16 2013
@@ -30,12 +30,17 @@ import java.util.Date;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Locale;
+import java.util.Map;
 import java.util.StringTokenizer;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.commons.lang.SystemUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.Shell;
 
 import com.google.common.net.InetAddresses;
 
@@ -52,6 +57,27 @@ public class StringUtils {
   public static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   /**
+   * Shell environment variables: $ followed by one letter or _ followed by
+   * multiple letters, numbers, or underscores.  The group captures the
+   * environment variable name without the leading $.
+   */
+  public static final Pattern SHELL_ENV_VAR_PATTERN =
+    Pattern.compile("\\$([A-Za-z_]{1}[A-Za-z0-9_]*)");
+
+  /**
+   * Windows environment variables: surrounded by %.  The group captures the
+   * environment variable name without the leading and trailing %.
+   */
+  public static final Pattern WIN_ENV_VAR_PATTERN = Pattern.compile("%(.*?)%");
+
+  /**
+   * Regular expression that matches and captures environment variable names
+   * according to platform-specific rules.
+   */
+  public static final Pattern ENV_VAR_PATTERN = Shell.WINDOWS ?
+    WIN_ENV_VAR_PATTERN : SHELL_ENV_VAR_PATTERN;
+
+  /**
    * Make a string representation of the exception.
    * @param e The exception to stringify
    * @return A string with exception name and call stack.
@@ -588,6 +614,13 @@ public class StringUtils {
         )
       );
 
+    if (SystemUtils.IS_OS_UNIX) {
+      try {
+        SignalLogger.INSTANCE.register(LOG);
+      } catch (Throwable t) {
+        LOG.warn("failed to register any UNIX signal loggers: ", t);
+      }
+    }
     ShutdownHookManager.get().addShutdownHook(
       new Runnable() {
         @Override
@@ -792,6 +825,28 @@ public class StringUtils {
   }
 
   /**
+   * Concatenates strings, using a separator.
+   *
+   * @param separator to join with
+   * @param strings to join
+   * @return  the joined string
+   */
+  public static String join(CharSequence separator, String[] strings) {
+    // Ideally we don't have to duplicate the code here if array is iterable.
+    StringBuilder sb = new StringBuilder();
+    boolean first = true;
+    for (String s : strings) {
+      if (first) {
+        first = false;
+      } else {
+        sb.append(separator);
+      }
+      sb.append(s);
+    }
+    return sb.toString();
+  }
+
+  /**
    * Convert SOME_STUFF to SomeStuff
    *
    * @param s input string
@@ -806,4 +861,37 @@ public class StringUtils {
 
     return sb.toString();
   }
+
+  /**
+   * Matches a template string against a pattern, replaces matched tokens with
+   * the supplied replacements, and returns the result.  The regular expression
+   * must use a capturing group.  The value of the first capturing group is used
+   * to look up the replacement.  If no replacement is found for the token, then
+   * it is replaced with the empty string.
+   * 
+   * For example, assume template is "%foo%_%bar%_%baz%", pattern is "%(.*?)%",
+   * and replacements contains 2 entries, mapping "foo" to "zoo" and "baz" to
+   * "zaz".  The result returned would be "zoo__zaz".
+   * 
+   * @param template String template to receive replacements
+   * @param pattern Pattern to match for identifying tokens, must use a capturing
+   *   group
+   * @param replacements Map<String, String> mapping tokens identified by the
+   *   capturing group to their replacement values
+   * @return String template with replacements
+   */
+  public static String replaceTokens(String template, Pattern pattern,
+      Map<String, String> replacements) {
+    StringBuffer sb = new StringBuffer();
+    Matcher matcher = pattern.matcher(template);
+    while (matcher.find()) {
+      String replacement = replacements.get(matcher.group(1));
+      if (replacement == null) {
+        replacement = "";
+      }
+      matcher.appendReplacement(sb, Matcher.quoteReplacement(replacement));
+    }
+    matcher.appendTail(sb);
+    return sb.toString();
+  }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java Mon Apr  1 16:47:16 2013
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.util;
 
+import java.io.IOException;
+import java.net.URL;
+import java.net.URLDecoder;
+import java.util.Enumeration;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -155,5 +160,7 @@ public class VersionInfo {
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Compiled by " + getUser() + " on " + getDate());
     System.out.println("From source with checksum " + getSrcChecksum());
+    System.out.println("This command was run using " + 
+        ClassUtil.findContainingJar(VersionInfo.class));
   }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/overview.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/overview.html?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/overview.html (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/java/overview.html Mon Apr  1 16:47:16 2013
@@ -60,9 +60,7 @@ that process vast amounts of data. Here'
     Hadoop was been demonstrated on GNU/Linux clusters with 2000 nodes.
   </li>
   <li>
-    Win32 is supported as a <i>development</i> platform. Distributed operation 
-    has not been well tested on Win32, so this is not a <i>production</i> 
-    platform.
+    Windows is also a supported platform.
   </li>  
 </ul>
   
@@ -84,15 +82,6 @@ that process vast amounts of data. Here'
   </li>
 </ol>
 
-<h4>Additional requirements for Windows</h4>
-
-<ol>
-  <li>
-    <a href="http://www.cygwin.com/">Cygwin</a> - Required for shell support in 
-    addition to the required software above.
-  </li>
-</ol>
-  
 <h3>Installing Required Software</h3>
 
 <p>If your platform does not have the required software listed above, you
@@ -104,13 +93,6 @@ $ sudo apt-get install ssh<br>
 $ sudo apt-get install rsync<br>
 </pre></blockquote></p>
 
-<p>On Windows, if you did not install the required software when you
-installed cygwin, start the cygwin installer and select the packages:</p>
-<ul>
-  <li>openssh - the "Net" category</li>
-  <li>rsync - the "Net" category</li>
-</ul>
-
 <h2>Getting Started</h2>
 
 <p>First, you need to get a copy of the Hadoop code.</p>

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c Mon Apr  1 16:47:16 2013
@@ -16,10 +16,14 @@
  * limitations under the License.
  */
 
-#include "config.h"
+
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
 
+#ifdef UNIX
+#include "config.h"
+#endif // UNIX
+
 //****************************
 // Simple Functions
 //****************************
@@ -61,6 +65,9 @@ JNIEXPORT void JNICALL Java_org_apache_h
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirect
 (JNIEnv *env, jobject thisj){
+  const char* uncompressed_bytes;
+  char *compressed_bytes;
+
   // Get members of Lz4Compressor
   jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf);
@@ -70,7 +77,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "Lz4Compressor");
-  const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (uncompressed_bytes == 0) {
@@ -79,7 +86,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "Lz4Compressor");
-  char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (compressed_bytes == 0) {

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c Mon Apr  1 16:47:16 2013
@@ -16,10 +16,13 @@
  * limitations under the License.
  */
 
-#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 
+#ifdef UNIX
+#include "config.h"
+#endif // UNIX
+
 int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize);
 
 /*
@@ -58,6 +61,9 @@ JNIEXPORT void JNICALL Java_org_apache_h
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_decompressBytesDirect
 (JNIEnv *env, jobject thisj){
+  const char *compressed_bytes;
+  char *uncompressed_bytes;
+
   // Get members of Lz4Decompressor
   jobject clazz = (*env)->GetStaticObjectField(env,thisj, Lz4Decompressor_clazz);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_compressedDirectBuf);
@@ -67,7 +73,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "Lz4Decompressor");
-  const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
 
   if (compressed_bytes == 0) {
@@ -76,7 +82,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "Lz4Decompressor");
-  char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
 
   if (uncompressed_bytes == 0) {

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c Mon Apr  1 16:47:16 2013
@@ -16,12 +16,18 @@
  * limitations under the License.
  */
 
-#include <dlfcn.h>
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
+#ifdef UNIX
+#include <dlfcn.h>
 #include "config.h"
+#endif // UNIX
+
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
@@ -81,7 +87,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (uncompressed_bytes == 0) {
-    return 0;
+    return (jint)0;
   }
 
   // Get the output direct buffer
@@ -90,7 +96,7 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (compressed_bytes == 0) {
-    return 0;
+    return (jint)0;
   }
 
   /* size_t should always be 4 bytes or larger. */
@@ -109,3 +115,5 @@ JNIEXPORT jint JNICALL Java_org_apache_h
   (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
   return (jint)buf_len;
 }
+
+#endif //define HADOOP_SNAPPY_LIBRARY

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c Mon Apr  1 16:47:16 2013
@@ -16,12 +16,18 @@
  * limitations under the License.
  */
 
-#include <dlfcn.h>
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
+#ifdef UNIX
 #include "config.h"
+#include <dlfcn.h>
+#endif
+
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
 
@@ -103,3 +109,5 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   return (jint)uncompressed_direct_buf_len;
 }
+
+#endif //define HADOOP_SNAPPY_LIBRARY

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c Mon Apr  1 16:47:16 2013
@@ -16,12 +16,15 @@
  * limitations under the License.
  */
 
-#include <dlfcn.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
+#ifdef UNIX
+#include <dlfcn.h>
 #include "config.h"
+#endif
+
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
 
@@ -35,48 +38,124 @@ static jfieldID ZlibCompressor_directBuf
 static jfieldID ZlibCompressor_finish;
 static jfieldID ZlibCompressor_finished;
 
+#ifdef UNIX
 static int (*dlsym_deflateInit2_)(z_streamp, int, int, int, int, int, const char *, int);
 static int (*dlsym_deflate)(z_streamp, int);
 static int (*dlsym_deflateSetDictionary)(z_streamp, const Bytef *, uInt);
 static int (*dlsym_deflateReset)(z_streamp);
 static int (*dlsym_deflateEnd)(z_streamp);
+#endif
+
+#ifdef WINDOWS
+#include <Strsafe.h>
+typedef int (__cdecl *__dlsym_deflateInit2_) (z_streamp, int, int, int, int, int, const char *, int);
+typedef int (__cdecl *__dlsym_deflate) (z_streamp, int);
+typedef int (__cdecl *__dlsym_deflateSetDictionary) (z_streamp, const Bytef *, uInt);
+typedef int (__cdecl *__dlsym_deflateReset) (z_streamp);
+typedef int (__cdecl *__dlsym_deflateEnd) (z_streamp);
+static __dlsym_deflateInit2_ dlsym_deflateInit2_;
+static __dlsym_deflate dlsym_deflate;
+static __dlsym_deflateSetDictionary dlsym_deflateSetDictionary;
+static __dlsym_deflateReset dlsym_deflateReset;
+static __dlsym_deflateEnd dlsym_deflateEnd;
+
+// Try to load zlib.dll from the dir where hadoop.dll is located.
+HANDLE LoadZlibTryHadoopNativeDir() {
+  HMODULE libz = NULL;
+  PCWSTR HADOOP_DLL = L"hadoop.dll";
+  size_t HADOOP_DLL_LEN = 10;
+  WCHAR path[MAX_PATH] = { 0 };
+  BOOL isPathValid = FALSE;
+
+  // Get hadoop.dll full path
+  HMODULE hModule = GetModuleHandle(HADOOP_DLL);
+  if (hModule != NULL) {
+    if (GetModuleFileName(hModule, path, MAX_PATH) > 0) {
+      size_t size = 0;
+      if (StringCchLength(path, MAX_PATH, &size) == S_OK) {
+
+        // Update path variable to have the full path to the zlib.dll
+        size = size - HADOOP_DLL_LEN;
+        if (size >= 0) {
+          path[size] = L'\0';
+          if (StringCchCat(path, MAX_PATH, HADOOP_ZLIB_LIBRARY) == S_OK) {
+            isPathValid = TRUE;
+          }
+        }
+      }
+    }
+  }
+
+  if (isPathValid) {
+    libz = LoadLibrary(path);
+  }
+
+  // fallback to system paths
+  if (!libz) {
+    libz = LoadLibrary(HADOOP_ZLIB_LIBRARY);
+  }
+
+  return libz;
+}
+#endif
 
 JNIEXPORT void JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_initIDs(
 	JNIEnv *env, jclass class
 	) {
+#ifdef UNIX
 	// Load libz.so
 	void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
-	if (!libz) {
+  if (!libz) {
 		THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so");
 	  	return;
 	}
+#endif
+
+#ifdef WINDOWS
+  HMODULE libz = LoadZlibTryHadoopNativeDir();
+
+  if (!libz) {
+		THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zlib1.dll");
+    return;
+	}
+#endif
 
+#ifdef UNIX
 	// Locate the requisite symbols from libz.so
 	dlerror();                                 // Clear any existing error
-	LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
-	LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate");
-	LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
-	LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
-	LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
+  LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
+  LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate");
+  LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
+  LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
+  LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_deflateInit2_, dlsym_deflateInit2_, env, libz, "deflateInit2_");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_deflate, dlsym_deflate, env, libz, "deflate");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_deflateSetDictionary, dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_deflateReset, dlsym_deflateReset, env, libz, "deflateReset");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_deflateEnd, dlsym_deflateEnd, env, libz, "deflateEnd");
+#endif
 
 	// Initialize the requisite fieldIds
-    ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", 
+    ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
                                                       "Ljava/lang/Class;");
     ZlibCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     ZlibCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z");
     ZlibCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
-    ZlibCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, 
-    									"uncompressedDirectBuf", 
+    ZlibCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
+        "uncompressedDirectBuf",
     									"Ljava/nio/Buffer;");
-    ZlibCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class, 
+    ZlibCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class,
     										"uncompressedDirectBufOff", "I");
-    ZlibCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class, 
+    ZlibCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class,
     										"uncompressedDirectBufLen", "I");
-    ZlibCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, 
-    									"compressedDirectBuf", 
+    ZlibCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
+                      "compressedDirectBuf",
     									"Ljava/nio/Buffer;");
-    ZlibCompressor_directBufferSize = (*env)->GetFieldID(env, class, 
+    ZlibCompressor_directBufferSize = (*env)->GetFieldID(env, class,
     										"directBufferSize", "I");
 }
 
@@ -84,7 +163,9 @@ JNIEXPORT jlong JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_init(
 	JNIEnv *env, jclass class, jint level, jint strategy, jint windowBits
 	) {
-	// Create a z_stream
+    int rv = 0;
+    static const int memLevel = 8; 							// See zconf.h
+	  // Create a z_stream
     z_stream *stream = malloc(sizeof(z_stream));
     if (!stream) {
 		THROW(env, "java/lang/OutOfMemoryError", NULL);
@@ -93,17 +174,16 @@ Java_org_apache_hadoop_io_compress_zlib_
     memset((void*)stream, 0, sizeof(z_stream));
 
 	// Initialize stream
-	static const int memLevel = 8; 							// See zconf.h
-    int rv = (*dlsym_deflateInit2_)(stream, level, Z_DEFLATED, windowBits,
+    rv = (*dlsym_deflateInit2_)(stream, level, Z_DEFLATED, windowBits,
     			memLevel, strategy, ZLIB_VERSION, sizeof(z_stream));
-    			
+
     if (rv != Z_OK) {
 	    // Contingency - Report error by throwing appropriate exceptions
 	    free(stream);
 	    stream = NULL;
-	
+
 		switch (rv) {
-			case Z_MEM_ERROR: 
+			case Z_MEM_ERROR:
 			    {
 		    		THROW(env, "java/lang/OutOfMemoryError", NULL);
 			    }
@@ -120,27 +200,28 @@ Java_org_apache_hadoop_io_compress_zlib_
 		    break;
 	    }
 	}
-	
+
     return JLONG(stream);
 }
 
 JNIEXPORT void JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_setDictionary(
-	JNIEnv *env, jclass class, jlong stream, 
+	JNIEnv *env, jclass class, jlong stream,
 	jarray b, jint off, jint len
 	) {
+    int rv = 0;
     Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0);
     if (!buf) {
         return;
     }
-    int rv = dlsym_deflateSetDictionary(ZSTREAM(stream), buf + off, len);
+    rv = dlsym_deflateSetDictionary(ZSTREAM(stream), buf + off, len);
     (*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0);
-    
+
     if (rv != Z_OK) {
     	// Contingency - Report error by throwing appropriate exceptions
 	    switch (rv) {
 		    case Z_STREAM_ERROR:
-			{	
+			{
 		    	THROW(env, "java/lang/IllegalArgumentException", NULL);
 			}
 			break;
@@ -157,75 +238,85 @@ JNIEXPORT jint JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
 	JNIEnv *env, jobject this
 	) {
+    jobject clazz = NULL;
+    jobject uncompressed_direct_buf = NULL;
+    jint uncompressed_direct_buf_off = 0;
+    jint uncompressed_direct_buf_len = 0;
+    jobject compressed_direct_buf = NULL;
+    jint compressed_direct_buf_len = 0;
+    jboolean finish;
+    Bytef* uncompressed_bytes = NULL;
+    Bytef* compressed_bytes = NULL;
+    int rv = 0;
+    jint no_compressed_bytes = 0;
 	// Get members of ZlibCompressor
     z_stream *stream = ZSTREAM(
-    						(*env)->GetLongField(env, this, 
+                (*env)->GetLongField(env, this,
     									ZlibCompressor_stream)
     					);
     if (!stream) {
 		THROW(env, "java/lang/NullPointerException", NULL);
 		return (jint)0;
-    } 
+    }
 
     // Get members of ZlibCompressor
-    jobject clazz = (*env)->GetStaticObjectField(env, this, 
+    clazz = (*env)->GetStaticObjectField(env, this,
                                                  ZlibCompressor_clazz);
-	jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, 
+	uncompressed_direct_buf = (*env)->GetObjectField(env, this,
 									ZlibCompressor_uncompressedDirectBuf);
-	jint uncompressed_direct_buf_off = (*env)->GetIntField(env, this, 
+	uncompressed_direct_buf_off = (*env)->GetIntField(env, this,
 									ZlibCompressor_uncompressedDirectBufOff);
-	jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, 
+	uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
 									ZlibCompressor_uncompressedDirectBufLen);
 
-	jobject compressed_direct_buf = (*env)->GetObjectField(env, this, 
+	compressed_direct_buf = (*env)->GetObjectField(env, this,
 									ZlibCompressor_compressedDirectBuf);
-	jint compressed_direct_buf_len = (*env)->GetIntField(env, this, 
+	compressed_direct_buf_len = (*env)->GetIntField(env, this,
 									ZlibCompressor_directBufferSize);
 
-	jboolean finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish);
+	finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish);
 
     // Get the input direct buffer
     LOCK_CLASS(env, clazz, "ZlibCompressor");
-	Bytef* uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
+    uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
 											uncompressed_direct_buf);
     UNLOCK_CLASS(env, clazz, "ZlibCompressor");
-    
+
   	if (uncompressed_bytes == 0) {
     	return (jint)0;
 	}
-	
+
     // Get the output direct buffer
     LOCK_CLASS(env, clazz, "ZlibCompressor");
-	Bytef* compressed_bytes = (*env)->GetDirectBufferAddress(env, 
+    compressed_bytes = (*env)->GetDirectBufferAddress(env,
 										compressed_direct_buf);
     UNLOCK_CLASS(env, clazz, "ZlibCompressor");
 
   	if (compressed_bytes == 0) {
 		return (jint)0;
 	}
-	
+
 	// Re-calibrate the z_stream
   	stream->next_in = uncompressed_bytes + uncompressed_direct_buf_off;
   	stream->next_out = compressed_bytes;
   	stream->avail_in = uncompressed_direct_buf_len;
-	stream->avail_out = compressed_direct_buf_len;
-	
+    stream->avail_out = compressed_direct_buf_len;
+
 	// Compress
-	int rv = dlsym_deflate(stream, finish ? Z_FINISH : Z_NO_FLUSH);
+	rv = dlsym_deflate(stream, finish ? Z_FINISH : Z_NO_FLUSH);
 
-	jint no_compressed_bytes = 0;
 	switch (rv) {
     	// Contingency? - Report error by throwing appropriate exceptions
   		case Z_STREAM_END:
   		{
   			(*env)->SetBooleanField(env, this, ZlibCompressor_finished, JNI_TRUE);
   		} // cascade
-	  	case Z_OK: 
+      case Z_OK:
 	  	{
 	  		uncompressed_direct_buf_off += uncompressed_direct_buf_len - stream->avail_in;
-			(*env)->SetIntField(env, this, 
+			(*env)->SetIntField(env, this,
 						ZlibCompressor_uncompressedDirectBufOff, uncompressed_direct_buf_off);
-			(*env)->SetIntField(env, this, 
+			(*env)->SetIntField(env, this,
 						ZlibCompressor_uncompressedDirectBufLen, stream->avail_in);
 			no_compressed_bytes = compressed_direct_buf_len - stream->avail_out;
 	  	}
@@ -238,7 +329,7 @@ Java_org_apache_hadoop_io_compress_zlib_
 		}
 		break;
   	}
-  	
+
   	return no_compressed_bytes;
 }
 

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c Mon Apr  1 16:47:16 2013
@@ -16,12 +16,15 @@
  * limitations under the License.
  */
 
-#include <dlfcn.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
+#ifdef UNIX
+#include <dlfcn.h>
 #include "config.h"
+#endif
+
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
 
@@ -35,48 +38,88 @@ static jfieldID ZlibDecompressor_directB
 static jfieldID ZlibDecompressor_needDict;
 static jfieldID ZlibDecompressor_finished;
 
+#ifdef UNIX
 static int (*dlsym_inflateInit2_)(z_streamp, int, const char *, int);
 static int (*dlsym_inflate)(z_streamp, int);
 static int (*dlsym_inflateSetDictionary)(z_streamp, const Bytef *, uInt);
 static int (*dlsym_inflateReset)(z_streamp);
 static int (*dlsym_inflateEnd)(z_streamp);
+#endif
+
+#ifdef WINDOWS
+#include <Strsafe.h>
+typedef int (__cdecl *__dlsym_inflateInit2_)(z_streamp, int, const char *, int);
+typedef int (__cdecl *__dlsym_inflate)(z_streamp, int);
+typedef int (__cdecl *__dlsym_inflateSetDictionary)(z_streamp, const Bytef *, uInt);
+typedef int (__cdecl *__dlsym_inflateReset)(z_streamp);
+typedef int (__cdecl *__dlsym_inflateEnd)(z_streamp);
+static __dlsym_inflateInit2_ dlsym_inflateInit2_;
+static __dlsym_inflate dlsym_inflate;
+static __dlsym_inflateSetDictionary dlsym_inflateSetDictionary;
+static __dlsym_inflateReset dlsym_inflateReset;
+static __dlsym_inflateEnd dlsym_inflateEnd;
+extern HANDLE LoadZlibTryHadoopNativeDir();
+#endif
 
 JNIEXPORT void JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_initIDs(
-	JNIEnv *env, jclass class
+JNIEnv *env, jclass class
 	) {
 	// Load libz.so
-    void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+#ifdef UNIX
+  void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
 	if (!libz) {
 	  THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so");
 	  return;
-	} 
+	}
+#endif
+
+#ifdef WINDOWS
+  HMODULE libz = LoadZlibTryHadoopNativeDir();
+
+	if (!libz) {
+	  THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zlib1.dll");
+	  return;
+	}
+#endif
+
 
 	// Locate the requisite symbols from libz.so
+#ifdef UNIX
 	dlerror();                                 // Clear any existing error
 	LOAD_DYNAMIC_SYMBOL(dlsym_inflateInit2_, env, libz, "inflateInit2_");
 	LOAD_DYNAMIC_SYMBOL(dlsym_inflate, env, libz, "inflate");
 	LOAD_DYNAMIC_SYMBOL(dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
 	LOAD_DYNAMIC_SYMBOL(dlsym_inflateReset, env, libz, "inflateReset");
 	LOAD_DYNAMIC_SYMBOL(dlsym_inflateEnd, env, libz, "inflateEnd");
+#endif
+
+#ifdef WINDOWS
+	LOAD_DYNAMIC_SYMBOL(__dlsym_inflateInit2_, dlsym_inflateInit2_, env, libz, "inflateInit2_");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_inflate, dlsym_inflate, env, libz, "inflate");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_inflateSetDictionary, dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_inflateReset, dlsym_inflateReset, env, libz, "inflateReset");
+	LOAD_DYNAMIC_SYMBOL(__dlsym_inflateEnd, dlsym_inflateEnd, env, libz, "inflateEnd");
+#endif
+
 
-	// Initialize the requisite fieldIds
-    ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", 
+  // Initialize the requisite fieldIds
+    ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
                                                       "Ljava/lang/Class;");
     ZlibDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     ZlibDecompressor_needDict = (*env)->GetFieldID(env, class, "needDict", "Z");
     ZlibDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
-    ZlibDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class, 
-    											"compressedDirectBuf", 
+    ZlibDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
+                          "compressedDirectBuf",
     											"Ljava/nio/Buffer;");
-    ZlibDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class, 
+    ZlibDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class,
     										"compressedDirectBufOff", "I");
-    ZlibDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class, 
+    ZlibDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class,
     										"compressedDirectBufLen", "I");
-    ZlibDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class, 
-    											"uncompressedDirectBuf", 
+    ZlibDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
+                          "uncompressedDirectBuf",
     											"Ljava/nio/Buffer;");
-    ZlibDecompressor_directBufferSize = (*env)->GetFieldID(env, class, 
+    ZlibDecompressor_directBufferSize = (*env)->GetFieldID(env, class,
     											"directBufferSize", "I");
 }
 
@@ -84,21 +127,22 @@ JNIEXPORT jlong JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_init(
 	JNIEnv *env, jclass cls, jint windowBits
 	) {
+    int rv = 0;
     z_stream *stream = malloc(sizeof(z_stream));
     memset((void*)stream, 0, sizeof(z_stream));
 
     if (stream == 0) {
 		THROW(env, "java/lang/OutOfMemoryError", NULL);
 		return (jlong)0;
-    } 
-    
-    int rv = dlsym_inflateInit2_(stream, windowBits, ZLIB_VERSION, sizeof(z_stream));
+    }
+
+    rv = dlsym_inflateInit2_(stream, windowBits, ZLIB_VERSION, sizeof(z_stream));
 
 	if (rv != Z_OK) {
 	    // Contingency - Report error by throwing appropriate exceptions
 		free(stream);
 		stream = NULL;
-		
+
 		switch (rv) {
 		 	case Z_MEM_ERROR:
 		 	{
@@ -112,7 +156,7 @@ Java_org_apache_hadoop_io_compress_zlib_
 	  		break;
 		}
 	}
-	
+
 	return JLONG(stream);
 }
 
@@ -121,21 +165,22 @@ Java_org_apache_hadoop_io_compress_zlib_
 	JNIEnv *env, jclass cls, jlong stream,
 	jarray b, jint off, jint len
 	) {
+    int rv = 0;
     Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0);
     if (!buf) {
 		THROW(env, "java/lang/InternalError", NULL);
         return;
     }
-    int rv = dlsym_inflateSetDictionary(ZSTREAM(stream), buf + off, len);
+    rv = dlsym_inflateSetDictionary(ZSTREAM(stream), buf + off, len);
     (*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0);
-    
+
     if (rv != Z_OK) {
 	    // Contingency - Report error by throwing appropriate exceptions
 		switch (rv) {
 		    case Z_STREAM_ERROR:
 	    	case Z_DATA_ERROR:
 			{
-				THROW(env, "java/lang/IllegalArgumentException", 
+				THROW(env, "java/lang/IllegalArgumentException",
 					(ZSTREAM(stream))->msg);
 			}
 			break;
@@ -152,62 +197,71 @@ JNIEXPORT jint JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
 	JNIEnv *env, jobject this
 	) {
+    jobject clazz = NULL;
+    jarray compressed_direct_buf = NULL;
+    jint compressed_direct_buf_off = 0;
+    jint compressed_direct_buf_len = 0;
+    jarray uncompressed_direct_buf = NULL;
+    jint uncompressed_direct_buf_len = 0;
+    Bytef *compressed_bytes = NULL;
+    Bytef *uncompressed_bytes = NULL;
+    int rv = 0;
+    int no_decompressed_bytes = 0;
 	// Get members of ZlibDecompressor
     z_stream *stream = ZSTREAM(
-    						(*env)->GetLongField(env, this, 
+                (*env)->GetLongField(env, this,
     									ZlibDecompressor_stream)
     					);
     if (!stream) {
 		THROW(env, "java/lang/NullPointerException", NULL);
 		return (jint)0;
-    } 
+    }
 
     // Get members of ZlibDecompressor
-    jobject clazz = (*env)->GetStaticObjectField(env, this, 
+    clazz = (*env)->GetStaticObjectField(env, this,
                                                  ZlibDecompressor_clazz);
-	jarray compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, 
+	compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
 											ZlibDecompressor_compressedDirectBuf);
-	jint compressed_direct_buf_off = (*env)->GetIntField(env, this, 
+	compressed_direct_buf_off = (*env)->GetIntField(env, this,
 									ZlibDecompressor_compressedDirectBufOff);
-	jint compressed_direct_buf_len = (*env)->GetIntField(env, this, 
+	compressed_direct_buf_len = (*env)->GetIntField(env, this,
 									ZlibDecompressor_compressedDirectBufLen);
 
-	jarray uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this, 
+	uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
 											ZlibDecompressor_uncompressedDirectBuf);
-	jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this, 
+	uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
 										ZlibDecompressor_directBufferSize);
 
     // Get the input direct buffer
     LOCK_CLASS(env, clazz, "ZlibDecompressor");
-	Bytef *compressed_bytes = (*env)->GetDirectBufferAddress(env, 
+	compressed_bytes = (*env)->GetDirectBufferAddress(env,
 										compressed_direct_buf);
     UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
-    
+
 	if (!compressed_bytes) {
 	    return (jint)0;
 	}
-	
+
     // Get the output direct buffer
     LOCK_CLASS(env, clazz, "ZlibDecompressor");
-	Bytef *uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
+	uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
 											uncompressed_direct_buf);
     UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
 
 	if (!uncompressed_bytes) {
 	    return (jint)0;
 	}
-	
+
 	// Re-calibrate the z_stream
 	stream->next_in  = compressed_bytes + compressed_direct_buf_off;
 	stream->next_out = uncompressed_bytes;
 	stream->avail_in  = compressed_direct_buf_len;
 	stream->avail_out = uncompressed_direct_buf_len;
-	
+
 	// Decompress
-	int rv = dlsym_inflate(stream, Z_PARTIAL_FLUSH);
+	rv = dlsym_inflate(stream, Z_PARTIAL_FLUSH);
 
 	// Contingency? - Report error by throwing appropriate exceptions
-	int no_decompressed_bytes = 0;	
 	switch (rv) {
 		case Z_STREAM_END:
 		{
@@ -216,9 +270,9 @@ Java_org_apache_hadoop_io_compress_zlib_
 		case Z_OK:
 		{
 		    compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in;
-		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff, 
+		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
 		    			compressed_direct_buf_off);
-		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen, 
+		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
 		    			stream->avail_in);
 		    no_decompressed_bytes = uncompressed_direct_buf_len - stream->avail_out;
 		}
@@ -227,9 +281,9 @@ Java_org_apache_hadoop_io_compress_zlib_
 		{
 		    (*env)->SetBooleanField(env, this, ZlibDecompressor_needDict, JNI_TRUE);
 		    compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in;
-		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff, 
+		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
 		    			compressed_direct_buf_off);
-		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen, 
+		    (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
 		    			stream->avail_in);
 		}
 		break;
@@ -251,7 +305,7 @@ Java_org_apache_hadoop_io_compress_zlib_
 		}
 		break;
     }
-    
+
     return no_decompressed_bytes;
 }
 
@@ -299,4 +353,3 @@ Java_org_apache_hadoop_io_compress_zlib_
 /**
  * vim: sw=2: ts=2: et:
  */
-



Mime
View raw message