hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r588341 - in /lucene/hadoop/trunk: ./ src/java/org/apache/hadoop/filecache/ src/java/org/apache/hadoop/mapred/ src/java/org/apache/hadoop/util/ src/test/org/apache/hadoop/util/ src/test/resources/ src/test/resources/jarutils/ src/test/resou...
Date Thu, 25 Oct 2007 20:30:57 GMT
Author: cutting
Date: Thu Oct 25 13:30:55 2007
New Revision: 588341

URL: http://svn.apache.org/viewvc?rev=588341&view=rev
Log:
HADOOP-1622.  Permit multiple jars to be added to a job.  Contributed by Dennis Kubes.

Added:
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/JarUtils.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestJarUtils.java
    lucene/hadoop/trunk/src/test/resources/
    lucene/hadoop/trunk/src/test/resources/jarutils/
    lucene/hadoop/trunk/src/test/resources/jarutils/dir1/
    lucene/hadoop/trunk/src/test/resources/jarutils/dir1/file1
    lucene/hadoop/trunk/src/test/resources/jarutils/dir2/
    lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/
    lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/file2-1
    lucene/hadoop/trunk/src/test/resources/jarutils/dir2/file2
    lucene/hadoop/trunk/src/test/resources/jarutils/dir3/
    lucene/hadoop/trunk/src/test/resources/jarutils/dir3/file3
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/build.xml
    lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Thu Oct 25 13:30:55 2007
@@ -132,6 +132,9 @@
     HADOOP-1968. FileSystem supports wildcard input syntax "{ }".
     (Hairong Kuang via dhruba)
 
+    HADOOP-1622.  Permit multiple jar files to be added to a job.
+    (Dennis Kubes via cutting)
+
   OPTIMIZATIONS
 
     HADOOP-1910.  Reduce the number of RPCs that DistributedFileSystem.create()

Modified: lucene/hadoop/trunk/build.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/build.xml?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/build.xml (original)
+++ lucene/hadoop/trunk/build.xml Thu Oct 25 13:30:55 2007
@@ -57,8 +57,11 @@
             value="${build.dir}/c++-examples/${build.platform}"/>
 
   <property name="test.src.dir" value="${basedir}/src/test"/>
+  <property name="test.resources.dir" value="${test.src.dir}/resources" />
+  <property name="test.src.dir" value="${basedir}/src/test"/>
   <property name="test.build.dir" value="${build.dir}/test"/>
   <property name="test.generated.dir" value="${test.build.dir}/src"/>
+  <property name="test.build.resources" value="${test.build.dir}/resources"/>
   <property name="test.build.data" value="${test.build.dir}/data"/>
   <property name="test.cache.data" value="${test.build.dir}/cache"/>
   <property name="test.log.dir" value="${test.build.dir}/logs"/>
@@ -135,6 +138,7 @@
   <target name="init">
     <mkdir dir="${build.dir}"/>
     <mkdir dir="${build.classes}"/>
+    <mkdir dir="${test.build.resources}" />
     <mkdir dir="${build.src}"/>
     <mkdir dir="${build.webapps}/task/WEB-INF"/>
     <mkdir dir="${build.webapps}/job/WEB-INF"/>
@@ -467,6 +471,10 @@
     <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/org/apache/hadoop/dfs/hadoop-12-dfs-dir.tgz" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/org/apache/hadoop/dfs/hadoop-12-dfs-dir.txt" todir="${test.cache.data}"/>
+    <copy todir="${test.build.resources}">
+      <fileset dir="${test.resources.dir}"/>
+    </copy>
+    
   </target>
 
   <!-- ================================================================== -->
@@ -482,6 +490,7 @@
            fork="yes" maxmemory="256m" dir="${basedir}" timeout="${test.timeout}"
       errorProperty="tests.failed" failureProperty="tests.failed">
       <sysproperty key="test.build.data" value="${test.build.data}"/>
+      <sysproperty key="test.build.resources" value="${test.build.resources}"/>
       <sysproperty key="test.cache.data" value="${test.cache.data}"/>    	
       <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/>
       <sysproperty key="test.src.dir" value="${test.src.dir}"/>

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java Thu Oct 25 13:30:55 2007
@@ -288,7 +288,7 @@
       if (isArchive) {
         String tmpArchive = parchive.toString().toLowerCase();
         if (tmpArchive.endsWith(".jar")) {
-          RunJar.unJar(new File(parchive.toString()), new File(parchive
+          JarUtils.unJar(new File(parchive.toString()), new File(parchive
                                                                .getParent().toString()));
         } else if (tmpArchive.endsWith(".zip")) {
           FileUtil.unZip(new File(parchive.toString()), new File(parchive

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobClient.java Thu Oct 25 13:30:55 2007
@@ -21,6 +21,7 @@
 import java.io.BufferedWriter;
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -31,9 +32,11 @@
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
@@ -44,6 +47,7 @@
 import org.apache.hadoop.filecache.DistributedCache;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -56,6 +60,7 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapred.TaskInProgress;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.JarUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -424,8 +429,115 @@
     JobConf job = new JobConf(jobFile);
     return submitJob(job);
   }
+
+
+  /**
+   * Creates a mapreduce job jar file from all of the mapreduce job resources.
+   * 
+   * @param job The current job configuration.
+   * @param jobId The current job id.
+   * 
+   * @return The file path to the finished job jar.
+   * @throws IOException If an error occurs while creating the job jar file.
+   */
+  private File createJobJar(JobConf job, String jobId) 
+    throws IOException {
     
-   
+    // get both jar and jars as they can be set through either config
+    String[] resources = job.getJobResources();
+    boolean hasResources = (resources != null && resources.length > 0);   
+    File jobJarFile = null;
+    
+    // check for either a single or multiple jars
+    if (hasResources) {
+
+      // allow resources to be found through the classpath, absolute path, 
+      // a directory or through a containing jar file
+      List<File> jarList = new ArrayList<File>();
+      for (int i = 0; i < resources.length; i++) {
+        
+        // get the current resource
+        String current = resources[i];
+        if (current != null && current.length() > 0) {
+          
+          // create a file from the current resource and see if it exists
+          File currentFile = new File(current);
+          boolean exists = currentFile.exists();
+          
+          // if the resource is not an absolute path to a file
+          if (!exists) {
+
+            // try converting it to a classname
+            try {
+              
+              // try to find the containing jar on the classpath
+              Class cls = Class.forName(current);
+              String jar = JarUtils.findContainingJar(cls);
+              if (jar != null) {
+                currentFile = new File(jar);
+                if (currentFile.exists()) {
+                  jarList.add(new File(jar));
+                  continue;
+                }
+              }
+            }
+            catch (ClassNotFoundException e) {
+              // do nothing, not a classname
+            }
+ 
+            // try to find a resource on the classpath that matches, should be
+            // a jar but will technically find any matching resource
+            String jar = JarUtils.findJar(getClass(), current);
+            if (jar != null) {
+              currentFile = new File(jar);
+              if (currentFile.exists()) {
+                jarList.add(new File(jar));
+              }
+            }
+
+          }
+          else if (exists) {            
+            // the resource is an existing file or directory
+            jarList.add(new File(current));
+          }
+        }
+      }
+      
+      // get the list of final resources
+      int numResources = jarList.size();
+      File[] jarResources = ((numResources == 0) ? new File[0] : 
+        (File[])jarList.toArray(new File[jarList.size()]));
+      
+      // see if we are dealing with a single jar file
+      boolean hasSingleJar = false;
+      if (numResources == 1) {
+        File testJar =jarResources[0];
+        if (testJar.exists() && testJar.isFile()) {
+          hasSingleJar = true;
+          jobJarFile = testJar;
+        }
+      }
+      
+      // we only jar up if there is more than one resource or if there is a 
+      // single resource but it is not a jar file (i.e. it is a directory)
+      if (numResources > 1 || (numResources == 1 && !hasSingleJar) ){
+ 
+        // create an jartmp directory in the hadoop.tmp.dir
+        File tmpDir = new File(job.get("hadoop.tmp.dir"));
+
+        // create a complete job jar file from the unjar directory contents
+        // in the system temp directory and delete on exit
+        jobJarFile = FileUtil.createLocalTempFile(tmpDir, jobId + ".job.", true);
+        boolean uncompress = getConf().getBoolean("mapred.job.resources.uncompress", 
+          true);
+        JarUtils.jarAll(jarResources, jobJarFile, uncompress);
+      }
+    }
+    
+    // return the finished job jar file path
+    return jobJarFile;
+  }
+  
   /**
    * Submit a job to the MR system.
    * This returns a handle to the {@link RunningJob} which can be used to track
@@ -480,23 +592,29 @@
       }
       DistributedCache.setFileTimestamps(job, fileTimestamps.toString());
     }
-       
-    String originalJarPath = job.getJar();
-    short replication = (short)job.getInt("mapred.submit.replication", 10);
 
-    if (originalJarPath != null) {           // copy jar to JobTracker's fs
-      // use jar name if job is not named. 
-      if ("".equals(job.getJobName())){
-        job.setJobName(new Path(originalJarPath).getName());
+    // create the job jar file from all job jars
+    File jobJarFile = createJobJar(job, jobId);
+    short replication = (short)job.getInt("mapred.submit.replication", 10);
+    
+    // if we have a job jar file
+    if (jobJarFile != null) {
+    
+      // set the job name to the job jar file name if no name is set
+      if ("".equals(job.getJobName())){        
+        String jobName = jobJarFile.getName();
+        job.setJobName(new Path(jobName).getName());
       }
-      job.setJar(submitJarFile.toString());
-      fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
+      
+      // copy the merged job jar to the job filesystem and set replication
+      job.setJar(submitJarFile.toString());      
+      fs.copyFromLocalFile(new Path(jobJarFile.toString()), submitJarFile);
       fs.setReplication(submitJarFile, replication);
     } else {
       LOG.warn("No job jar file set.  User classes may not be found. "+
                "See JobConf(Class) or JobConf#setJar(String).");
     }
-
+    
     // Set the user's name and working directory
     String user = System.getProperty("user.name");
     job.setUser(user != null ? user : "Dr Who");
@@ -954,7 +1072,7 @@
   }
     
   public int run(String[] argv) throws Exception {
-    // process arguments
+
     String submitJobFile = null;
     String jobid = null;
     String taskid = null;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobConf.java Thu Oct 25 13:30:55 2007
@@ -21,12 +21,14 @@
 
 import java.io.IOException;
 
+import java.net.URL;
 import java.util.StringTokenizer;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Enumeration;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
-import java.net.URL;
 import java.net.URLDecoder;
 
 import org.apache.commons.logging.Log;
@@ -37,13 +39,18 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 
-import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 
 import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapred.lib.HashPartitioner;
+import org.apache.hadoop.util.JarUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 
@@ -164,6 +171,36 @@
   }
 
   /**
+   * <p>Returns an array of the unique resources for this map reduce job.  These
+   * are resources added with the {@link #addJobResource(String)} method and are
+   * not default or final configuration resources.
+   * 
+   * @return String[] A array of unique job resources in priority order.
+   */
+  public String[] getJobResources() {
+    
+    // get the various jar config settings
+    String jar = get("mapred.jar");
+    String resources = get("mapred.job.resources");
+    
+    // add additional resources first, followed by setJar
+    Set<String> allResources = new LinkedHashSet<String>();
+    if (resources != null && resources.length() > 0) {
+      String[] resAr = resources.split(",");
+      for (int i = 0; i < resAr.length; i++) {
+        allResources.add(resAr[i].trim());
+      }
+    }
+    if (jar != null && jar.length() > 0) {
+      allResources.add(jar);
+    }
+    
+    // return as a string array
+    return (String[])allResources.toArray(new String[allResources.size()]);
+  }
+
+
+  /**
    * Checks if <b>mapred-default.xml</b> is on the CLASSPATH, if so
    * it warns the user and loads it as a {@link Configuration} resource.
    * 
@@ -179,9 +216,42 @@
   }
   
   /**
+   * <p>Adds an additional resource to the mapreduce job.  A resource can be
+   * a file on the local filesystem, the name of a class contained in a jar in 
+   * the classpath, a jar that is on the classpath, or a directory on the local 
+   * file system.</p> 
+   * 
+   * <p>When a job is submitted to the MR system, all resources are merged into
+   * a single job.jar file.  Each resource takes priority over any previously
+   * added resources.  If there are any conflicts, resources added later will
+   * overwrite resources added earlier.</p>
+   * 
+   * <p>One thing to note is that empty directories inside of resource jars will 
+   * not be copied over to the merged job jar.</p>
+   * 
+   * @param resource The resource to be added to the mapreduce job.
+   */
+  public void addJobResource(String resource) {
+    
+    if (resource != null && resource.length() > 0) {
+      
+      String resources = get("mapred.job.resources");
+      if (resources != null && resources.length() > 0) {
+        resources = (resource + "," + resources);
+      }
+      else {
+        resources = resource;
+      }
+      
+      set("mapred.job.resources", resources);
+    }
+  }
+
+  /**
    * Get the user jar for the map-reduce job.
    * 
    * @return the user jar for the map-reduce job.
+   * @deprecated Use {@link #getJobResources()} instead.
    */
   public String getJar() { return get("mapred.jar"); }
   
@@ -189,6 +259,7 @@
    * Set the user jar for the map-reduce job.
    * 
    * @param jar the user jar for the map-reduce job.
+   * @deprecated Use {@link #addJobResource(String)} instead.
    */
   public void setJar(String jar) { set("mapred.jar", jar); }
   
@@ -198,9 +269,9 @@
    * @param cls the example class.
    */
   public void setJarByClass(Class cls) {
-    String jar = findContainingJar(cls);
+    String jar = JarUtils.findContainingJar(cls);
     if (jar != null) {
-      setJar(jar);
+      addJobResource(cls.toString());
     }   
   }
 
@@ -1212,6 +1283,5 @@
     }
     return null;
   }
-
 }
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Thu Oct 25 13:30:55 2007
@@ -64,6 +64,7 @@
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.util.JarUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.RunJar;
 import org.apache.hadoop.util.StringUtils;
@@ -604,7 +605,7 @@
               throw new IOException("Mkdirs failed to create " + workDir.toString());
             }
           }
-          RunJar.unJar(new File(localJarFile.toString()), workDir);
+          JarUtils.unJar(new File(localJarFile.toString()), workDir);
         }
         rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) ||
                              localJobConf.getKeepFailedTaskFiles());

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/JarUtils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/JarUtils.java?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/JarUtils.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/JarUtils.java Thu Oct 25 13:30:55 2007
@@ -0,0 +1,543 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class JarUtils {
+
+  private JarUtils() {}                           // no public ctor
+
+  private static final Log LOG = LogFactory.getLog(JarUtils.class);
+
+  /**
+   * <p>Recursive method that get a listing of all files inside of a directory.
+   * This is used by the {@link #jar(File, File)} method to get all files that
+   * need to be included in the jar.</p>
+   * 
+   * @param dir The input directory in which to find files.
+   * @param toJarList A listing of all files found.
+   * 
+   * @return File[] An array of files to be jared.
+   */
+  private static File[] getToBeJared(File dir, List <File> toJarList) {
+
+    // look through the children of the directory
+    File[] children = dir.listFiles();
+    for (int i = 0; i < children.length; i++) {
+
+      // add files that exist to the jar list, if directory then recurse
+      File child = children[i];
+      if (child != null && child.exists()) {
+        if (!child.isDirectory()) {
+          toJarList.add(child);
+        } else {
+          getToBeJared(child, toJarList);
+        }
+      }
+    }
+
+    return toJarList.toArray(new File[toJarList.size()]);
+  }
+
+  /**
+   * <p>Rudimentary check for whether a file is a zip or jar file.  It seems that
+   * most jar and zip file all have the same magic number 504b03040a.  This 
+   * method simply reads the first 5 bytes of the file passed and checks it 
+   * agains the magic number.</p>
+   * 
+   * @param file The file to check.
+   * @return True If the first 5 bytes of the file matches 504b03040a.
+   * 
+   * @throws IOException If there is an error reading the file.
+   */
+  public static boolean isJarOrZipFile(File file)
+    throws IOException {
+
+    // read the first 5 bytes of the file and close the stream
+    FileInputStream fis = new FileInputStream(file);
+    byte[] buffer = new byte[2];
+    try {      
+      if (fis.read(buffer) != 2)
+        return false;
+    }
+    finally {
+      fis.close();
+    }
+
+    // check it against the magic number 504b
+    boolean isJarOrZip = (buffer[0] == (byte)0x50 && buffer[1] == (byte)0x4b);
+
+    return isJarOrZip;
+  }
+
+  /**
+   * <p>Returns the path the resource will use within the jar file.  This is
+   * the path from the parent directory being jared to the resource itself.
+   * We use this path to ensure that resources are unjared into the correct
+   * directory structure.</p>
+   * 
+   * @param dirToJar The directory being jared.
+   * @param currentResource The current resource.
+   * 
+   * @return String The path to use in the jar file for the resource.
+   */
+  public static String getJarPath(File dirToJar, File currentResource) {
+
+    // find the path from the resource up recursively to the directory
+    // being jared
+    String currentPath = null;
+    while (!currentResource.toString().equals(dirToJar.toString())) {
+      currentPath = currentResource.getName()
+        + (currentPath == null ? "" : File.separator + currentPath);
+      currentResource = currentResource.getParentFile();
+    }
+
+    return currentPath;
+  }
+
+  /**
+   * Copies the contents of one jar to a jar output stream.
+   * 
+   * @param jarFile The input jar.
+   * @param jarOut The output jar.
+   * 
+   * @throws IOException If an error occurs while copying jar contents.
+   */
+  public static void copyJarContents(File jarFile, JarOutputStream jarOut)
+    throws IOException {
+
+    // get the input stream to read the input jar
+    FileInputStream in = new FileInputStream(jarFile);
+    byte buffer[] = new byte[1024];
+    JarInputStream jarIn = new JarInputStream(in);
+    JarEntry current = null;
+
+    // loop through the jar entries in the input jar
+    while ((current = jarIn.getNextJarEntry()) != null) {
+
+      // put each entry in the output jar stream
+      jarOut.putNextEntry(current);
+      long numBytes = current.getSize();
+      int totalRead = 0;
+
+      // copy the contents over to the stream
+      while (totalRead <= numBytes) {
+        int numRead = jarIn.read(buffer, 0, buffer.length);
+        if (numRead <= 0) {
+          break;
+        }
+        else {
+          totalRead += numRead;
+        }
+        jarOut.write(buffer, 0, numRead);
+      }
+    }
+
+    // close the input jar stream
+    jarIn.close();
+    in.close();
+  }
+
+  /**
+   * <p>Creates an output jar with all of the file resources passed.  Resources
+   * can be both files and directories.  If the resources is a directory then 
+   * the entire directory and all of its contents are copied into the new jar.
+   * </p>
+   * 
+   * <p>All resources are copied from their original locations so there is no
+   * need for any temporary directory.  This also has much better performance 
+   * than copying resources to a temporary directory and then jaring.</p>
+   * 
+   * <p>The uncompress option specifies whether jar and zip resources should
+   * be uncompressed into the new jar file or should be included as whole file
+   * units in the new jar file.
+   */
+  public static void jarAll(File[] resources, File outputJar, boolean uncompress)
+    throws IOException {
+
+    // if we have resources to jar up
+    if (resources != null && resources.length > 0) {
+
+      // create a jar file and a manifest to keep track of what we are adding
+      FileOutputStream stream = new FileOutputStream(outputJar);
+      JarOutputStream jarOut = new JarOutputStream(stream, new Manifest());
+      Set <String> jared = new HashSet <String>();
+
+      for (int i = 0; i < resources.length; i++) {
+
+        // if we have an existing resource
+        File curRes = resources[i];
+        if (curRes != null && curRes.exists()) {
+
+          if (curRes.isFile()) {
+
+            // see if the file is a compressed file or not, assumes .jar and 
+            // .zip are compressed to save time
+            String resName = curRes.getName();
+            boolean isJarOrZip = resName.endsWith(".jar")
+              || resName.endsWith(".zip") || isJarOrZipFile(curRes);
+
+            // if we are uncompressing jars and the resource is a jar or zip
+            if (uncompress && isJarOrZip) {
+
+              // copy the contents of the jar directly to the output jar
+              // this avoids having to make a temp directory, we don't use
+              // the utility method because we want to filter what we copy
+              // and not overwrite resources
+              FileInputStream in = new FileInputStream(curRes);
+              byte buffer[] = new byte[1024];
+              JarFile jarFile = new JarFile(curRes);
+              Enumeration <JarEntry> entries = jarFile.entries();
+
+              // loop through the entries in the jar to copy
+              while (entries.hasMoreElements()) {
+
+                // see if the entry already exists in our output jar
+                JarEntry entry = entries.nextElement();
+                String entryName = entry.getName();
+                boolean exists = jared.contains(entryName);
+
+                // we don't copy over anything that already exists (i.e. no 
+                // overwriting), we also don't copy over empty directories as 
+                // any directory will be created with the resources they contain
+                // and finally we don't copy over manifest files
+                if (!exists && !entry.isDirectory()
+                  && !entryName.equals(jarFile.MANIFEST_NAME)) {
+
+                  InputStream entryIn = jarFile.getInputStream(entry);
+                  jarOut.putNextEntry(entry);
+                  int totalRead = 0;
+
+                  int numRead = 0;
+                  while ((numRead = entryIn.read(buffer, 0, buffer.length)) != -1) {
+                    totalRead += numRead;
+                    jarOut.write(buffer, 0, numRead);
+                  }
+
+                  // log the entry added and close the entry input stream
+                  LOG.info("Adding " + entry + ":" + totalRead + " to "
+                    + outputJar.getName() + ".");
+                  entryIn.close();
+
+                  // add the entry paths
+                  jared.add(entryName);
+                }
+              }
+
+              // close the input jar
+              jarFile.close();
+              in.close();
+            }
+            else {
+
+              // we are not uncompressing jar resources or the resource is not
+              // a compressed file, we just copy over to output jar.  
+              String resourceName = curRes.getName();
+              boolean exists = jared.contains(resourceName);
+
+              // if the resource already exists, then we don't overwrite
+              if (!exists) {
+
+                // add the jar entry
+                JarEntry jarAdd = new JarEntry(resourceName);
+                jarAdd.setTime(curRes.lastModified());
+                jarOut.putNextEntry(jarAdd);
+
+                LOG.info("Adding " + resourceName + ":" + curRes.length()
+                  + " to " + outputJar.getName() + ".");
+
+                // create a stream to copy the resource
+                FileInputStream in = new FileInputStream(curRes);
+                byte buffer[] = new byte[1024];
+                long numBytes = curRes.length();
+                int totalRead = 0;
+
+                // copy the resource to the output stream
+                while (totalRead <= numBytes) {
+                  int numRead = in.read(buffer, 0, buffer.length);
+                  if (numRead <= 0) {
+                    break;
+                  }
+                  else {
+                    totalRead += numRead;
+                  }
+                  jarOut.write(buffer, 0, numRead);
+                }
+
+                // flush the current jar output stream
+                jarOut.flush();
+
+                // add the entry paths
+                jared.add(resourceName);
+
+                // close the copier stream
+                in.close();
+              }
+            }
+          }
+          else if (curRes.isDirectory()) {
+
+            // get a listing of the files to be jared
+            File[] toBeJared = getToBeJared(curRes, new ArrayList <File>());
+            byte buffer[] = new byte[1024];
+
+            // loop through the files
+            for (int k = 0; k < toBeJared.length; k++) {
+
+              // for each entry create the correct jar path name
+              File current = toBeJared[k];
+              String currentPath = getJarPath(curRes, current);
+              boolean exists = jared.contains(currentPath);
+
+              // don't overwrite resources
+              if (!exists) {
+
+                // Add entry to the jar
+                JarEntry jarAdd = new JarEntry(currentPath);
+                jarAdd.setTime(toBeJared[k].lastModified());
+                jarOut.putNextEntry(jarAdd);
+
+                LOG.info("Adding " + currentPath + ":" + curRes.length()
+                  + " to " + outputJar.getName() + ".");
+
+                // create a stream to copy the file
+                FileInputStream in = new FileInputStream(toBeJared[k]);
+                while (true) {
+                  int numRead = in.read(buffer, 0, buffer.length);
+                  if (numRead <= 0) {
+                    break;
+                  }
+                  jarOut.write(buffer, 0, numRead);
+                }
+
+                // add the entry paths
+                jared.add(currentPath);
+
+                // close the copier stream
+                in.close();
+              }
+            }
+          } // end file or directory
+        }
+      } // end resources loop
+
+      // close the output jar streams
+      jarOut.close();
+      stream.close();
+    }
+  }
+
+  /** 
+   * <p>Creates a Jar file from the directory passed.  The jar file will contain
+   * all file inside the directory, but not the directory itself.</p>
+   * 
+   * @param dir The directory to jar.
+   * @param jarFile The output jar archive file.
+   * 
+   * @throws IOException  If a problem occurs while jarring the directory.
+   */
+  public static void jar(File dir, File jarFile)
+    throws IOException {
+
+    // if the directory to jar doesn't exist or isn't a directory
+    if (dir == null || !dir.isDirectory()) {
+      throw new IllegalArgumentException("Input must be an existing directory.");
+    }
+
+    // get a listing of the files to be jared
+    File[] toBeJared = getToBeJared(dir, new ArrayList <File>());
+
+    byte buffer[] = new byte[1024];
+    FileOutputStream stream = new FileOutputStream(jarFile);
+    JarOutputStream out = new JarOutputStream(stream, new Manifest());
+
+    // loop through the files
+    for (int i = 0; i < toBeJared.length; i++) {
+
+      // for each entry create the correct jar path name
+      File current = toBeJared[i];
+      String currentPath = null;
+      while (!current.toString().equals(dir.toString())) {
+        currentPath = current.getName()
+          + (currentPath == null ? "" : File.separator + currentPath);
+        current = current.getParentFile();
+      }
+
+      // Add entry to the jar
+      JarEntry jarAdd = new JarEntry(currentPath);
+      jarAdd.setTime(toBeJared[i].lastModified());
+      out.putNextEntry(jarAdd);
+
+      // Write file to archive
+      FileInputStream in = new FileInputStream(toBeJared[i]);
+      while (true) {
+        int nRead = in.read(buffer, 0, buffer.length);
+        if (nRead <= 0)
+          break;
+        out.write(buffer, 0, nRead);
+      }
+
+      in.close();
+    }
+
+    out.close();
+    stream.close();
+  }
+
+  /** Unpack a jar file into a directory. */
+  public static void unJar(File jarFile, File toDir)
+    throws IOException {
+    JarFile jar = new JarFile(jarFile);
+    try {
+      Enumeration entries = jar.entries();
+      while (entries.hasMoreElements()) {
+        JarEntry entry = (JarEntry)entries.nextElement();
+        if (!entry.isDirectory()) {
+          InputStream in = jar.getInputStream(entry);
+          try {
+            File file = new File(toDir, entry.getName());
+            if (!file.getParentFile().mkdirs()) {
+              if (!file.getParentFile().isDirectory()) {
+                throw new IOException("Mkdirs failed to create "
+                  + file.getParentFile().toString());
+              }
+            }
+            OutputStream out = new FileOutputStream(file);
+            try {
+              byte[] buffer = new byte[8192];
+              int i;
+              while ((i = in.read(buffer)) != -1) {
+                out.write(buffer, 0, i);
+              }
+            }
+            finally {
+              out.close();
+            }
+          }
+          finally {
+            in.close();
+          }
+        }
+      }
+    }
+    finally {
+      jar.close();
+    }
+  }
+
+  /**
+   * <p>Returns the full path to a jar on the classpath that matches name given. 
+   * This method will start with the classloader containing the current class 
+   * and will progressively search all parent classloaders until a match is
+   * found or all resources are exhausted.  Only the first match is returned.
+   * </p>
+   * 
+   * @param currentClass A class from the classloader to start searching.
+   * @param jarName The jar to search for.
+   * 
+   * @return String The full path to the first matching jar or null if no 
+   * matching jar is found.
+   */
+  public static String findJar(Class currentClass, String jarName) {
+
+    ClassLoader loader = currentClass.getClassLoader();
+
+    try {
+      while (loader != null) {
+        if (loader instanceof URLClassLoader) {
+          URLClassLoader ucl = (URLClassLoader)loader;
+          URL[] urls = ucl.getURLs();
+          for (int i = 0; i < urls.length; i++) {
+            URL url = urls[i];
+            String fullpath = url.toString();
+            if (!fullpath.endsWith("/")) {
+              String fileName = fullpath.substring(fullpath.lastIndexOf("/") + 1);
+              if (jarName.equals(fileName)) {
+                String toReturn = url.getPath();
+                if (toReturn.startsWith("file:")) {
+                  toReturn = toReturn.substring("file:".length());
+                }
+                toReturn = URLDecoder.decode(toReturn, "UTF-8");
+                return toReturn.replaceAll("!.*$", "");
+              }
+            }
+          }
+        }
+        loader = loader.getParent();
+      }
+    }
+    catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+
+    return null;
+  }
+
+  /** 
+   * Find a jar that contains a class of the same name, if any.
+   * It will return a jar file, even if that is not the first thing
+   * on the class path that has a class with the same name.
+   * 
+   * @param my_class the class to find
+   * @return a jar file that contains the class, or null
+   * @throws IOException
+   */
+  public static String findContainingJar(Class my_class) {
+    ClassLoader loader = my_class.getClassLoader();
+    String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
+    try {
+      for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) {
+        URL url = (URL)itr.nextElement();
+        if ("jar".equals(url.getProtocol())) {
+          String toReturn = url.getPath();
+          if (toReturn.startsWith("file:")) {
+            toReturn = toReturn.substring("file:".length());
+          }
+          toReturn = URLDecoder.decode(toReturn, "UTF-8");
+          return toReturn.replaceAll("!.*$", "");
+        }
+      }
+    }
+    catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return null;
+  }
+}

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java?rev=588341&r1=588340&r2=588341&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java Thu Oct 25 13:30:55 2007
@@ -30,42 +30,12 @@
 
 /** Run a Hadoop job jar. */
 public class RunJar {
-
-  /** Unpack a jar file into a directory. */
-  public static void unJar(File jarFile, File toDir) throws IOException {
-    JarFile jar = new JarFile(jarFile);
-    try {
-      Enumeration entries = jar.entries();
-      while (entries.hasMoreElements()) {
-        JarEntry entry = (JarEntry)entries.nextElement();
-        if (!entry.isDirectory()) {
-          InputStream in = jar.getInputStream(entry);
-          try {
-            File file = new File(toDir, entry.getName());
-            if (!file.getParentFile().mkdirs()) {
-              if (!file.getParentFile().isDirectory()) {
-                throw new IOException("Mkdirs failed to create " + 
-                                      file.getParentFile().toString());
-              }
-            }
-            OutputStream out = new FileOutputStream(file);
-            try {
-              byte[] buffer = new byte[8192];
-              int i;
-              while ((i = in.read(buffer)) != -1) {
-                out.write(buffer, 0, i);
-              }
-            } finally {
-              out.close();
-            }
-          } finally {
-            in.close();
-          }
-        }
-      }
-    } finally {
-      jar.close();
-    }
+  
+  /** @deprecated Use {@link JarUtils#unJar(File, File)} instead. */
+  @Deprecated
+  public static void unJar(File jarFile, File toDir)
+    throws IOException {
+    JarUtils.unJar(jarFile, toDir);
   }
 
   /** Run a Hadoop job jar.  If the main class is not in the jar's manifest,
@@ -129,7 +99,7 @@
         }
       });
 
-    unJar(file, workDir);
+    JarUtils.unJar(file, workDir);
     
     ArrayList<URL> classPath = new ArrayList<URL>();
     classPath.add(new File(workDir+"/").toURL());

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestJarUtils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestJarUtils.java?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestJarUtils.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestJarUtils.java Thu Oct 25 13:30:55 2007
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.jar.JarEntry;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+
+import junit.framework.TestCase;
+
+public class TestJarUtils
+  extends TestCase {
+
+  private File resourcesDir;
+  private File jarUtilsDir;
+  private File outJar1;
+  private File outJar2;
+  private String sep = File.separator;
+
+  protected void setUp()
+    throws Exception {
+    
+    resourcesDir = new File(System.getProperty("test.build.resources","."));
+    outJar1 = new File(resourcesDir, "outjar1.jar");
+    outJar2 = new File(resourcesDir, "outjar2.jar");
+    jarUtilsDir = new File(resourcesDir, "jarutils");
+  }
+
+  protected void tearDown()
+    throws Exception {
+    outJar1.delete();
+    outJar2.delete();
+  }
+
+  public void testGetJarPath() {
+
+    File dir1 = new File(jarUtilsDir, "dir1");
+    File file1 = new File(dir1, "file1");
+    assertEquals(JarUtils.getJarPath(dir1, file1), "file1");
+
+    File dir2 = new File(jarUtilsDir, "dir2");
+    String file21Path = "dir2-1" + sep + "file2-1";
+    File file21 = new File(dir2, file21Path);
+    assertEquals(JarUtils.getJarPath(dir2, file21), file21Path);
+  }
+
+  public void testJar()
+    throws Exception {
+
+    JarUtils.jar(jarUtilsDir, outJar1);
+    FileInputStream fis = new FileInputStream(outJar1);
+    JarInputStream jis = new JarInputStream(fis);
+    JarEntry entry = null;
+    Set <String> resources = new HashSet <String>();
+    while ((entry = jis.getNextJarEntry()) != null) {
+      resources.add(entry.getName());
+    }
+
+    jis.close();
+    fis.close();
+
+    boolean good = true;
+    good = resources.contains("dir1" + sep + "file1")
+      && resources.contains("dir2" + sep + "file2")
+      && resources.contains("dir2" + sep + "dir2-1" + sep + "file2-1")
+      && resources.contains("dir3" + sep + "file3");
+    assertTrue(good);
+    assertEquals(resources.size(), 4);
+  }
+
+  public void testIsJarOrZip()
+    throws Exception {
+
+    File file1 = new File(jarUtilsDir, "dir1" + sep + "file1");
+    JarUtils.jar(jarUtilsDir, outJar1);
+    assertTrue(JarUtils.isJarOrZipFile(outJar1));
+    assertFalse(JarUtils.isJarOrZipFile(file1));
+  }
+
+  public void testJarAll()
+    throws Exception {
+
+    JarUtils.jar(jarUtilsDir, outJar1);
+    File file3 = new File(jarUtilsDir, "dir3" + sep + "file3");
+    File dir1 = new File(jarUtilsDir, "dir1");
+    File[] resources = new File[3];
+    resources[0] = dir1;
+    resources[1] = outJar1;
+    resources[2] = file3;
+
+    JarUtils.jarAll(resources, outJar2, true);
+
+    FileInputStream fis = new FileInputStream(outJar2);
+    JarInputStream jis = new JarInputStream(fis);
+    JarEntry entry = null;
+    Set <String> allres = new HashSet <String>();
+    while ((entry = jis.getNextJarEntry()) != null) {
+      allres.add(entry.getName());
+    }
+
+    jis.close();
+    fis.close();
+
+    boolean good = true;
+    good = allres.contains("dir1" + sep + "file1")
+      && allres.contains("dir2" + sep + "file2")
+      && allres.contains("dir2" + sep + "dir2-1" + sep + "file2-1")
+      && allres.contains("dir3" + sep + "file3") && allres.contains("file3")
+      && allres.contains("file1");
+    assertTrue(good);
+    assertEquals(allres.size(), 6);
+
+    JarUtils.jarAll(resources, outJar2, false);
+
+    FileInputStream fis2 = new FileInputStream(outJar2);
+    JarInputStream jis2 = new JarInputStream(fis2);
+    JarEntry entry2 = null;
+    Set <String> allres2 = new HashSet <String>();
+    while ((entry2 = jis2.getNextJarEntry()) != null) {
+      allres2.add(entry2.getName());
+    }
+
+    jis2.close();
+    fis2.close();
+
+    boolean good2 = true;
+    good = allres2.contains("outjar1.jar") && allres2.contains("file3")
+      && allres2.contains("file1");
+    assertTrue(good2);
+    assertEquals(allres2.size(), 3);
+  }
+
+  public void testCopyJarContents()
+    throws Exception {
+
+    JarUtils.jar(jarUtilsDir, outJar1);
+
+    FileOutputStream fos = new FileOutputStream(outJar2);
+    JarOutputStream jos = new JarOutputStream(fos);
+
+    JarUtils.copyJarContents(outJar1, jos);
+
+    jos.close();
+    fos.close();
+
+    jos.close();
+    fos.close();
+
+    FileInputStream fis = new FileInputStream(outJar2);
+    JarInputStream jis = new JarInputStream(fis);
+    JarEntry entry = null;
+    Set <String> allres = new HashSet <String>();
+    while ((entry = jis.getNextJarEntry()) != null) {
+      allres.add(entry.getName());
+    }
+
+    jis.close();
+    fis.close();
+
+    boolean good = true;
+    good = allres.contains("dir1" + sep + "file1")
+      && allres.contains("dir2" + sep + "file2")
+      && allres.contains("dir2" + sep + "dir2-1" + sep + "file2-1")
+      && allres.contains("dir3" + sep + "file3");
+    assertTrue(good);
+    assertEquals(allres.size(), 4);
+  }
+}

Added: lucene/hadoop/trunk/src/test/resources/jarutils/dir1/file1
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/resources/jarutils/dir1/file1?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/resources/jarutils/dir1/file1 (added)
+++ lucene/hadoop/trunk/src/test/resources/jarutils/dir1/file1 Thu Oct 25 13:30:55 2007
@@ -0,0 +1 @@
+file1
\ No newline at end of file

Added: lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/file2-1
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/file2-1?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/file2-1 (added)
+++ lucene/hadoop/trunk/src/test/resources/jarutils/dir2/dir2-1/file2-1 Thu Oct 25 13:30:55 2007
@@ -0,0 +1 @@
+file2-1
\ No newline at end of file

Added: lucene/hadoop/trunk/src/test/resources/jarutils/dir2/file2
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/resources/jarutils/dir2/file2?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/resources/jarutils/dir2/file2 (added)
+++ lucene/hadoop/trunk/src/test/resources/jarutils/dir2/file2 Thu Oct 25 13:30:55 2007
@@ -0,0 +1 @@
+file2
\ No newline at end of file

Added: lucene/hadoop/trunk/src/test/resources/jarutils/dir3/file3
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/resources/jarutils/dir3/file3?rev=588341&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/resources/jarutils/dir3/file3 (added)
+++ lucene/hadoop/trunk/src/test/resources/jarutils/dir3/file3 Thu Oct 25 13:30:55 2007
@@ -0,0 +1 @@
+file3
\ No newline at end of file



Mime
View raw message