incubator-blur-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amccu...@apache.org
Subject [1/2] git commit: Hadoop2 unit tests pass.
Date Fri, 02 May 2014 15:30:29 GMT
Repository: incubator-blur
Updated Branches:
  refs/heads/apache-blur-0.2 34a39f83b -> 5d18449fc


Hadoop2 unit tests pass.


Project: http://git-wip-us.apache.org/repos/asf/incubator-blur/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-blur/commit/837f7506
Tree: http://git-wip-us.apache.org/repos/asf/incubator-blur/tree/837f7506
Diff: http://git-wip-us.apache.org/repos/asf/incubator-blur/diff/837f7506

Branch: refs/heads/apache-blur-0.2
Commit: 837f75068d50db5d8c253ce83194653a4a782de3
Parents: 7afca43
Author: Aaron McCurry <amccurry@gmail.com>
Authored: Fri May 2 11:29:39 2014 -0400
Committer: Aaron McCurry <amccurry@gmail.com>
Committed: Fri May 2 11:29:39 2014 -0400

----------------------------------------------------------------------
 blur-mapred-hadoop2/pom.xml                     |   2 +-
 .../blur/mapreduce/lib/BlurOutputCommitter.java |  75 +++++-----
 .../blur/mapreduce/lib/BlurOutputFormat.java    |   5 +-
 .../blur/mapreduce/lib/CsvBlurDriver.java       | 142 +++++++++++--------
 .../lib/BlurOutputFormatMiniClusterTest.java    |  27 ++--
 .../mapreduce/lib/BlurOutputFormatTest.java     |   2 +-
 .../org/apache/blur/mapreduce/lib/Test.java     |  38 -----
 7 files changed, 138 insertions(+), 153 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/pom.xml b/blur-mapred-hadoop2/pom.xml
index 4127e4e..3c6f07e 100644
--- a/blur-mapred-hadoop2/pom.xml
+++ b/blur-mapred-hadoop2/pom.xml
@@ -156,7 +156,7 @@ under the License.
 				      <groupId>org.apache.mrunit</groupId>
 				      <artifactId>mrunit</artifactId>
 				      <version>${mrunit.version}</version>
-				      <classifier>hadoop1</classifier>
+				      <classifier>hadoop2</classifier>
 					  <scope>test</scope>
                 </dependency>
 				<dependency>

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputCommitter.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputCommitter.java
b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputCommitter.java
index d906974..ab866fd 100644
--- a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputCommitter.java
+++ b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputCommitter.java
@@ -17,7 +17,6 @@ package org.apache.blur.mapreduce.lib;
  * limitations under the License.
  */
 import java.io.IOException;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.blur.log.Log;
 import org.apache.blur.log.LogFactory;
@@ -30,31 +29,16 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.TaskType;
 
 public class BlurOutputCommitter extends OutputCommitter {
 
   private static final Log LOG = LogFactory.getLog(BlurOutputCommitter.class);
 
-  private Path _newIndex;
-  private Configuration _configuration;
-  private TaskAttemptID _taskAttemptID;
-  private Path _indexPath;
-  private final boolean _runTaskCommit;
-  private TableDescriptor _tableDescriptor;
-
-  public BlurOutputCommitter(TaskType taskType, int numReduceTasks) {
-    if (taskType == TaskType.MAP && numReduceTasks != 0) {
-      _runTaskCommit = false;
-    } else {
-      _runTaskCommit = true;
-    }
-  }
-
   @Override
   public void setupJob(JobContext jobContext) throws IOException {
     LOG.info("Running setup job.");
@@ -173,7 +157,13 @@ public class BlurOutputCommitter extends OutputCommitter {
 
   @Override
   public boolean needsTaskCommit(TaskAttemptContext context) throws IOException {
-    return _runTaskCommit;
+    TaskAttemptID taskAttemptID = context.getTaskAttemptID();
+    TaskType taskType = taskAttemptID.getTaskType();
+    if (taskType == TaskType.MAP && context.getNumReduceTasks() != 0) {
+      return false;
+    } else {
+      return true;
+    }
   }
 
   @Override
@@ -181,40 +171,51 @@ public class BlurOutputCommitter extends OutputCommitter {
     LOG.info("Running task setup.");
   }
 
+  private static class Conf {
+    Path _newIndex;
+    Configuration _configuration;
+    TaskAttemptID _taskAttemptID;
+    Path _indexPath;
+    TableDescriptor _tableDescriptor;
+  }
+
   @Override
   public void commitTask(TaskAttemptContext context) throws IOException {
     LOG.info("Running commit task.");
-    setup(context);
-    FileSystem fileSystem = _newIndex.getFileSystem(_configuration);
-    if (fileSystem.exists(_newIndex) && !fileSystem.isFile(_newIndex)) {
-      Path dst = new Path(_indexPath, _taskAttemptID.toString() + ".task_complete");
-      LOG.info("Committing [{0}] to [{1}]", _newIndex, dst);
-      fileSystem.rename(_newIndex, dst);
+    Conf conf = setup(context);
+    FileSystem fileSystem = conf._newIndex.getFileSystem(conf._configuration);
+    if (fileSystem.exists(conf._newIndex) && !fileSystem.isFile(conf._newIndex))
{
+      Path dst = new Path(conf._indexPath, conf._taskAttemptID.toString() + ".task_complete");
+      LOG.info("Committing [{0}] to [{1}]", conf._newIndex, dst);
+      fileSystem.rename(conf._newIndex, dst);
     } else {
-      throw new IOException("Path [" + _newIndex + "] does not exist, can not commit.");
+      throw new IOException("Path [" + conf._newIndex + "] does not exist, can not commit.");
     }
   }
 
   @Override
   public void abortTask(TaskAttemptContext context) throws IOException {
     LOG.info("Running abort task.");
-    setup(context);
-    FileSystem fileSystem = _newIndex.getFileSystem(_configuration);
-    LOG.info("abortTask - Deleting [{0}]", _newIndex);
-    fileSystem.delete(_newIndex, true);
+    Conf conf = setup(context);
+    FileSystem fileSystem = conf._newIndex.getFileSystem(conf._configuration);
+    LOG.info("abortTask - Deleting [{0}]", conf._newIndex);
+    fileSystem.delete(conf._newIndex, true);
   }
 
-  private void setup(TaskAttemptContext context) throws IOException {
-    _configuration = context.getConfiguration();
-    _tableDescriptor = BlurOutputFormat.getTableDescriptor(_configuration);
-    int shardCount = _tableDescriptor.getShardCount();
+  private Conf setup(TaskAttemptContext context) throws IOException {
+    LOG.info("Setting up committer with task attempt [{0}]", context.getTaskAttemptID().toString());
+    Conf conf = new Conf();
+    conf._configuration = context.getConfiguration();
+    conf._tableDescriptor = BlurOutputFormat.getTableDescriptor(conf._configuration);
+    int shardCount = conf._tableDescriptor.getShardCount();
     int attemptId = context.getTaskAttemptID().getTaskID().getId();
     int shardId = attemptId % shardCount;
-    _taskAttemptID = context.getTaskAttemptID();
-    Path tableOutput = BlurOutputFormat.getOutputPath(_configuration);
+    conf._taskAttemptID = context.getTaskAttemptID();
+    Path tableOutput = BlurOutputFormat.getOutputPath(conf._configuration);
     String shardName = BlurUtil.getShardName(BlurConstants.SHARD_PREFIX, shardId);
-    _indexPath = new Path(tableOutput, shardName);
-    _newIndex = new Path(_indexPath, _taskAttemptID.toString() + ".tmp");
+    conf._indexPath = new Path(tableOutput, shardName);
+    conf._newIndex = new Path(conf._indexPath, conf._taskAttemptID.toString() + ".tmp");
+    return conf;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputFormat.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputFormat.java
b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputFormat.java
index 20d2e33..c959997 100644
--- a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputFormat.java
+++ b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/BlurOutputFormat.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -122,9 +121,7 @@ public class BlurOutputFormat extends OutputFormat<Text, BlurMutate>
{
 
   @Override
   public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException,
InterruptedException {
-    TaskAttemptID taskAttemptID = context.getTaskAttemptID();
-    TaskType taskType = taskAttemptID.getTaskType();
-    return new BlurOutputCommitter(taskType, context.getNumReduceTasks());
+    return new BlurOutputCommitter();
   }
 
   public static TableDescriptor getTableDescriptor(Configuration configuration) throws IOException
{

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/CsvBlurDriver.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/CsvBlurDriver.java
b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/CsvBlurDriver.java
index c1fa7b9..76ae35f 100644
--- a/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/CsvBlurDriver.java
+++ b/blur-mapred-hadoop2/src/main/java/org/apache/blur/mapreduce/lib/CsvBlurDriver.java
@@ -21,6 +21,8 @@ import java.io.PrintWriter;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.blur.log.Log;
+import org.apache.blur.log.LogFactory;
 import org.apache.blur.thrift.BlurClient;
 import org.apache.blur.thrift.generated.Blur.Iface;
 import org.apache.blur.thrift.generated.TableDescriptor;
@@ -62,17 +64,20 @@ import com.google.common.base.Splitter;
 @SuppressWarnings("static-access")
 public class CsvBlurDriver {
 
+  private static final Log LOG = LogFactory.getLog(CsvBlurDriver.class);
+
   public static final String CSVLOADER = "csvloader";
   public static final String MAPRED_COMPRESS_MAP_OUTPUT = "mapred.compress.map.output";
   public static final String MAPRED_MAP_OUTPUT_COMPRESSION_CODEC = "mapred.map.output.compression.codec";
   public static final int DEFAULT_WIDTH = 100;
-  public static final String HEADER = "The \"" +CSVLOADER +
-  		"\" command is used to load delimited into a Blur table.\nThe required options are \"-c\",
\"-t\", \"-d\". The " +
-  		"standard format for the contents of a file is:\"rowid,recordid,family,col1,col2,...\".
However there are " +
-  		"several options, such as the rowid and recordid can be generated based on the data in
the record via the " +
-  		"\"-A\" and \"-a\" options. The family can assigned based on the path via the \"-I\"
option. The column " +
-  		"name order can be mapped via the \"-d\" option. Also you can set the input " +
-  		"format to either sequence files vie the \"-S\" option or leave the default text files.";
+  public static final String HEADER = "The \""
+      + CSVLOADER
+      + "\" command is used to load delimited into a Blur table.\nThe required options are
\"-c\", \"-t\", \"-d\". The "
+      + "standard format for the contents of a file is:\"rowid,recordid,family,col1,col2,...\".
However there are "
+      + "several options, such as the rowid and recordid can be generated based on the data
in the record via the "
+      + "\"-A\" and \"-a\" options. The family can assigned based on the path via the \"-I\"
option. The column "
+      + "name order can be mapped via the \"-d\" option. Also you can set the input "
+      + "format to either sequence files vie the \"-S\" option or leave the default text
files.";
 
   enum COMPRESSION {
     SNAPPY(SnappyCodec.class), GZIP(GzipCodec.class), BZIP(BZip2Codec.class), DEFAULT(DefaultCodec.class);
@@ -180,10 +185,10 @@ public class CsvBlurDriver {
     }
     // processing the 'I' option
     if (cmd.hasOption("I")) {
-    	if(cmd.hasOption("C")){
-    		 System.err.println("'I' and 'C' both parameters can not be used together.");
-             return null;
-    	}
+      if (cmd.hasOption("C")) {
+        System.err.println("'I' and 'C' both parameters can not be used together.");
+        return null;
+      }
       Option[] options = cmd.getOptions();
       for (Option option : options) {
         if (option.getOpt().equals("I")) {
@@ -245,6 +250,10 @@ public class CsvBlurDriver {
       throws IOException {
     Set<Path> pathSet = new HashSet<Path>();
     FileSystem fileSystem = path.getFileSystem(configuration);
+    if (!fileSystem.exists(path)) {
+      LOG.warn("Path not found [{0}]", path);
+      return pathSet;
+    }
     FileStatus[] listStatus = fileSystem.listStatus(path);
     for (FileStatus status : listStatus) {
       if (status.isDirectory()) {
@@ -272,10 +281,20 @@ public class CsvBlurDriver {
             "The file delimiter to be used. (default value ',')  NOTE: For special "
                 + "charactors like the default hadoop separator of ASCII value 1, you can
use standard "
                 + "java escaping (\\u0001)").create("s"));
-    options.addOption(OptionBuilder.withArgName("path*").hasArg()
-        .withDescription("The directory to index, the family name is assumed to BE present
in the file contents. (hdfs://namenode/input/in1)").create("i"));
-    options.addOption(OptionBuilder.withArgName("family path*").hasArgs()
-        .withDescription("The directory to index with a family name, the family name is assumed
to NOT be present in the file contents. (family hdfs://namenode/input/in1)").create("I"));
+    options
+        .addOption(OptionBuilder
+            .withArgName("path*")
+            .hasArg()
+            .withDescription(
+                "The directory to index, the family name is assumed to BE present in the
file contents. (hdfs://namenode/input/in1)")
+            .create("i"));
+    options
+        .addOption(OptionBuilder
+            .withArgName("family path*")
+            .hasArgs()
+            .withDescription(
+                "The directory to index with a family name, the family name is assumed to
NOT be present in the file contents. (family hdfs://namenode/input/in1)")
+            .create("I"));
     options
         .addOption(OptionBuilder
             .withArgName("auto generate record ids")
@@ -352,58 +371,57 @@ public class CsvBlurDriver {
 
   public static class CsvBlurCombineSequenceFileInputFormat extends CombineFileInputFormat<Writable,
Text> {
 
-    
-    private static class SequenceFileRecordReaderWrapper extends RecordReader<Writable,
Text>{
-    	
-    	private final RecordReader<Writable,Text> delegate;
-    	private final FileSplit fileSplit;
-
-		@SuppressWarnings("unused")
-		public SequenceFileRecordReaderWrapper(CombineFileSplit split,
-            TaskAttemptContext context, Integer index) throws IOException{
-            fileSplit = new FileSplit(split.getPath(index),
-                      split.getOffset(index), split.getLength(index),
-                      split.getLocations());
-            delegate = new SequenceFileInputFormat<Writable,Text>().createRecordReader(fileSplit,
context);
-        }
+    private static class SequenceFileRecordReaderWrapper extends RecordReader<Writable,
Text> {
 
-        @Override public float getProgress() throws IOException, InterruptedException {
-            return delegate.getProgress();
-        }
+      private final RecordReader<Writable, Text> delegate;
+      private final FileSplit fileSplit;
+
+      @SuppressWarnings("unused")
+      public SequenceFileRecordReaderWrapper(CombineFileSplit split, TaskAttemptContext context,
Integer index)
+          throws IOException {
+        fileSplit = new FileSplit(split.getPath(index), split.getOffset(index), split.getLength(index),
+            split.getLocations());
+        delegate = new SequenceFileInputFormat<Writable, Text>().createRecordReader(fileSplit,
context);
+      }
 
-		@Override
-		public Writable getCurrentKey() throws IOException,
-				InterruptedException {
-			return delegate.getCurrentKey();
-		}
-
-		@Override
-		public Text getCurrentValue() throws IOException, InterruptedException {
-			return delegate.getCurrentValue();
-		}
-
-		@Override
-		public void initialize(InputSplit arg0, TaskAttemptContext context)
-				throws IOException, InterruptedException {
-			delegate.initialize(fileSplit, context);
-		}
-
-		@Override
-		public boolean nextKeyValue() throws IOException, InterruptedException {
-			return delegate.nextKeyValue();
-		}
-		
-		@Override public void close() throws IOException {
-            delegate.close();
-		}
+      @Override
+      public float getProgress() throws IOException, InterruptedException {
+        return delegate.getProgress();
+      }
+
+      @Override
+      public Writable getCurrentKey() throws IOException, InterruptedException {
+        return delegate.getCurrentKey();
+      }
+
+      @Override
+      public Text getCurrentValue() throws IOException, InterruptedException {
+        return delegate.getCurrentValue();
+      }
+
+      @Override
+      public void initialize(InputSplit arg0, TaskAttemptContext context) throws IOException,
InterruptedException {
+        delegate.initialize(fileSplit, context);
+      }
+
+      @Override
+      public boolean nextKeyValue() throws IOException, InterruptedException {
+        return delegate.nextKeyValue();
+      }
+
+      @Override
+      public void close() throws IOException {
+        delegate.close();
+      }
 
     }
-    	
+
     @Override
-	public RecordReader<Writable, Text> createRecordReader(
-			InputSplit split, TaskAttemptContext context) throws IOException {
-		return new CombineFileRecordReader<Writable, Text>((CombineFileSplit) split, context,
SequenceFileRecordReaderWrapper.class);
-	}
+    public RecordReader<Writable, Text> createRecordReader(InputSplit split, TaskAttemptContext
context)
+        throws IOException {
+      return new CombineFileRecordReader<Writable, Text>((CombineFileSplit) split,
context,
+          SequenceFileRecordReaderWrapper.class);
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatMiniClusterTest.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatMiniClusterTest.java
b/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatMiniClusterTest.java
index c14e86e..a7ee0f0 100644
--- a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatMiniClusterTest.java
+++ b/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatMiniClusterTest.java
@@ -16,7 +16,9 @@ package org.apache.blur.mapreduce.lib;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.BufferedReader;
 import java.io.DataInputStream;
@@ -44,8 +46,8 @@ import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
+import org.apache.hadoop.mapred.MiniMRYarnClusterAdapter;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -59,9 +61,8 @@ public class BlurOutputFormatMiniClusterTest {
 
   private static Configuration conf = new Configuration();
   private static FileSystem fileSystem;
-  private static MiniMRCluster mr;
+  private static MiniMRYarnClusterAdapter mr;
   private static Path TEST_ROOT_DIR;
-  private static JobConf jobConf;
   private static MiniCluster miniCluster;
   private Path inDir = new Path(TEST_ROOT_DIR + "/in");
   private static final File TMPDIR = new File(System.getProperty("blur.tmp.dir",
@@ -70,6 +71,7 @@ public class BlurOutputFormatMiniClusterTest {
   @BeforeClass
   public static void setupTest() throws Exception {
     GCWatcher.init(0.60);
+    BlurOutputFormatTest.setupJavaHome();
     LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
     File testDirectory = new File(TMPDIR, "blur-cluster-test").getAbsoluteFile();
     testDirectory.mkdirs();
@@ -101,15 +103,19 @@ public class BlurOutputFormatMiniClusterTest {
     } catch (IOException io) {
       throw new RuntimeException("problem getting local fs", io);
     }
-    mr = new MiniMRCluster(1, miniCluster.getFileSystemUri().toString(), 1);
-    jobConf = mr.createJobConf();
+
+    FileSystem.setDefaultUri(conf, miniCluster.getFileSystemUri());
+    mr = (MiniMRYarnClusterAdapter) MiniMRClientClusterFactory.create(BlurOutputFormatTest.class,
1, conf);
+    mr.start();
+    conf = mr.getConfig();
+    
     BufferStore.initNewBuffer(128, 128 * 128);
   }
 
   @AfterClass
   public static void teardown() {
     if (mr != null) {
-      mr.shutdown();
+      mr.stop();
     }
     miniCluster.shutdownBlurCluster();
     rm(new File("build"));
@@ -140,13 +146,14 @@ public class BlurOutputFormatMiniClusterTest {
     writeRecordsFile("in/part1", 1, 1, 1, 1, "cf1");
     writeRecordsFile("in/part2", 1, 1, 2, 1, "cf1");
 
-    Job job = new Job(jobConf, "blur index");
+    Job job = Job.getInstance(conf, "blur index");
     job.setJarByClass(BlurOutputFormatMiniClusterTest.class);
     job.setMapperClass(CsvBlurMapper.class);
     job.setInputFormatClass(TextInputFormat.class);
 
     FileInputFormat.addInputPath(job, new Path(TEST_ROOT_DIR + "/in"));
-    String tableUri = new Path(TEST_ROOT_DIR + "/blur/" + tableName).toString();
+    String tableUri = new Path(TEST_ROOT_DIR + "/blur/" + tableName).makeQualified(fileSystem.getUri(),
fileSystem.getWorkingDirectory())
+        .toString();
     CsvBlurMapper.addColumns(job, "cf1", "col");
 
     TableDescriptor tableDescriptor = new TableDescriptor();

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatTest.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatTest.java
b/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatTest.java
index d3db116..2cdeeb8 100644
--- a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatTest.java
+++ b/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/BlurOutputFormatTest.java
@@ -82,7 +82,7 @@ public class BlurOutputFormatTest {
     BufferStore.initNewBuffer(128, 128 * 128);
   }
 
-  private static void setupJavaHome() {
+  public static void setupJavaHome() {
     String str = System.getenv("JAVA_HOME");
     if (str == null) {
       throw new RuntimeException("JAVA_HOME not set.");

http://git-wip-us.apache.org/repos/asf/incubator-blur/blob/837f7506/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/Test.java
----------------------------------------------------------------------
diff --git a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/Test.java b/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/Test.java
deleted file mode 100644
index 5999f80..0000000
--- a/blur-mapred-hadoop2/src/test/java/org/apache/blur/mapreduce/lib/Test.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package org.apache.blur.mapreduce.lib;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.mapreduce.MiniHadoopClusterManager;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Test {
-
-  public static void main(String[] args) throws FileNotFoundException, IOException, URISyntaxException
{
-    MiniHadoopClusterManager manager = new MiniHadoopClusterManager();
-    String[] sargs = new String[]{"-D" + MiniDFSCluster.HDFS_MINIDFS_BASEDIR + "=./dfs-mini-tmp"};
-    manager.run(sargs);
-    manager.start();
-  
-
-  }
-
-}


Mime
View raw message