hadoop-mapreduce-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "Jason Lowe (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (MAPREDUCE-5668) Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext, but interface was expected
Date Wed, 04 Dec 2013 14:18:36 GMT

    [ https://issues.apache.org/jira/browse/MAPREDUCE-5668?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13838917#comment-13838917
] 

Jason Lowe commented on MAPREDUCE-5668:
---------------------------------------

This is something more appropriately asked on the [user@ mailing list|http://hadoop.apache.org/mailing_lists.html#User].
 JIRAs are for reporting bugs against Hadoop, and this does not appear to be a bug.  It looks
like the code has been compiled against a 2.x release but then run against a 1.x release,
as org.apache.hadoop.mapreduce.JobContext changed from a class to an interface between the
1.x and 2.x releases.

The org.apache.hadoop.mapreduce.* API is only guaranteed to be source, not binary, compatible
between the 1.x and 2.x releases.   See the [binary compatibility document|http://hadoop.apache.org/docs/r2.2.0/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html]
for more details.  Also we cannot generally support compiling against a later release and
then running on an earlier release, because new APIs could have been added and would not appear
in the older release.

> Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext,
but interface was expected
> -------------------------------------------------------------------------------------------------------------------------------------------------
>
>                 Key: MAPREDUCE-5668
>                 URL: https://issues.apache.org/jira/browse/MAPREDUCE-5668
>             Project: Hadoop Map/Reduce
>          Issue Type: Bug
>            Reporter: ranjini
>
> hi
>  pl help
> i have wrote this code , at runtime i got this issue.
> Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext,
but interface was expected
> 	at org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.getSplits(CombineFileInputFormat.java:170)
> 	at org.apache.hadoop.mapred.JobClient.writeNewSplits(JobClient.java:885)
> 	at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:779)
> 	at org.apache.hadoop.mapreduce.Job.submit(Job.java:432)
> 	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:447)
> 	at MultiFileWordCount.run(MultiFileWordCount.java:395)
> 	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
> 	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
> 	at MultiFileWordCount.main(MultiFileWordCount.java:401)
> 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
> 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
> 	at java.lang.reflect.Method.invoke(Method.java:597)
> 	at org.apache.hadoop.util.RunJar.main(RunJar.java:156)
> hduser@localhost:~$ 
> I have attached the code.
> import java.io.DataInput;  
> import java.io.DataOutput;  
> import java.io.IOException;  
> import java.util.StringTokenizer;  
> import org.apache.hadoop.conf.Configured;  
> import org.apache.hadoop.fs.FSDataInputStream;  
> import org.apache.hadoop.fs.FileSystem;  
> import org.apache.hadoop.fs.Path;  
> import org.apache.hadoop.io.IntWritable;  
> import org.apache.hadoop.io.Text;  
> import org.apache.hadoop.io.WritableComparable;  
> import org.apache.hadoop.mapreduce.InputSplit;  
> import org.apache.hadoop.mapreduce.Job;  
> import org.apache.hadoop.mapreduce.Mapper;  
> import org.apache.hadoop.mapreduce.RecordReader;  
> import org.apache.hadoop.mapreduce.TaskAttemptContext;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;  
> import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
> import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;  
> import org.apache.hadoop.util.LineReader;  
> import org.apache.hadoop.util.Tool;  
> import org.apache.hadoop.util.ToolRunner;  
>  /**  
>   * MultiFileWordCount is an example to demonstrate the usage of   
>   * MultiFileInputFormat. This examples counts the occurrences of  
>   * words in the text files under the given input directory.  
>   */ 
> public class MultiFileWordCount extends Configured implements Tool {  
>    /**  
>     * This record keeps <filename,offset> pairs.  
>     */ 
> public static class WordOffset implements WritableComparable {  
>    private long offset;  
>    private String fileName;  
>   
>    public void readFields(DataInput in) throws IOException {  
>       this.offset = in.readLong();  
>       this.fileName = Text.readString(in);  
>      }  
>      public void write(DataOutput out) throws IOException {  
>        out.writeLong(offset);  
>        Text.writeString(out, fileName);  
>      }  
>       public int compareTo(Object o) {  
>        WordOffset that = (WordOffset)o;  
>        int f = this.fileName.compareTo(that.fileName);  
>        if(f == 0) {  
>          return (int)Math.signum((double)(this.offset - that.offset));  
>        }  
>        return f;  
>      }  
>      @Override 
>      public boolean equals(Object obj) {  
>        if(obj instanceof WordOffset)  
>        return this.compareTo(obj) == 0;  
>        return false;  
>      }  
>      @Override 
>      public int hashCode() {  
>      assert false : "hashCode not designed";  
>      return 42; //an arbitrary constant  
>      }  
>    }  
>    /**  
>     * To use {@link CombineFileInputFormat}, one should extend it, to return a   
>     * (custom) {@link RecordReader}. CombineFileInputFormat uses   
>     * {@link CombineFileSplit}s.   
>     */ 
>    public static class MyInputFormat   
>      extends CombineFileInputFormat  {  
>      public RecordReader createRecordReader(InputSplit split,  
>       TaskAttemptContext context) throws IOException {  
>        return new CombineFileRecordReader(  
>          (CombineFileSplit)split, context, CombineFileLineRecordReader.class);  
>      }  
>    }  
>   
>    /**  
>     * RecordReader is responsible from extracting records from a chunk  
>     * of the CombineFileSplit.   
>     */ 
>    public static class CombineFileLineRecordReader   
>      extends RecordReader {  
>      private long startOffset; //offset of the chunk;  
>      private long end; //end of the chunk;  
>      private long pos; // current pos   
>      private FileSystem fs;  
>      private Path path;  
>      private WordOffset key;  
>      private Text value;  
>      private FSDataInputStream fileIn;  
>      private LineReader reader;  
>      public CombineFileLineRecordReader(CombineFileSplit split,  
>          TaskAttemptContext context, Integer index) throws IOException {  
>       
>        this.path = split.getPath(index);  
>        fs = this.path.getFileSystem(context.getConfiguration());  
>        this.startOffset = split.getOffset(index);  
>        this.end = startOffset + split.getLength(index);  
>        boolean skipFirstLine = false;  
>        //open the file  
>        fileIn = fs.open(path);  
>        if (startOffset != 0) {  
>        skipFirstLine = true;  
>          --startOffset;  
>          fileIn.seek(startOffset);  
>        }  
>        reader = new LineReader(fileIn);  
>        if (skipFirstLine) {  // skip first line and re-establish "startOffset".  
>         startOffset += reader.readLine(new Text(), 0,  
>          (int)Math.min((long)Integer.MAX_VALUE, end - startOffset));  
>        }  
>        this.pos = startOffset;  
>      }  
>      public void initialize(InputSplit split, TaskAttemptContext context)  
>         throws IOException, InterruptedException {  
>      }
>      public void close() throws IOException { }  
>      public float getProgress() throws IOException {  
>        if (startOffset == end) {  
>         return 0.0f;  
>        } else {  
>        return Math.min(1.0f, (pos - startOffset) / (float)(end - startOffset));  
>        }  
>      }  
>     public boolean nextKeyValue() throws IOException {  
>        if (key == null) {  
>          key = new WordOffset();  
>          key.fileName = path.getName();  
>        }  
>        key.offset = pos;  
>        if (value == null) {  
>        value = new Text();  
>        }  
>        int newSize = 0;  
>        if (pos < end) {  
>         newSize = reader.readLine(value);  
>         pos += newSize;  
>        }  
>        if (newSize == 0) {  
>         key = null;  
>         value = null;  
>         return false;  
>        } else {  
>         return true;  
>        }  
>      }  
>          public WordOffset getCurrentKey()   
>          throws IOException, InterruptedException {  
>           return key;  
>      }  
>          public Text getCurrentValue() throws IOException, InterruptedException {  
>          return value;  
>      }  
>    }  
>   
>    /**  
>     * This Mapper is similar to the one in {@link WordCount.MapClass}.  
>     */ 
>    public static class MapClass extends  
>        Mapper {  
>      private final static IntWritable one = new IntWritable(1);  
>      private Text word = new Text();  
>      public void map(WordOffset key, Text value, Context context)  
>          throws IOException, InterruptedException {  
>        String line = value.toString();  
>        StringTokenizer itr = new StringTokenizer(line);  
>        while (itr.hasMoreTokens()) {  
>          word.set(itr.nextToken());  
>          context.write(word, one);  
>        }  
>      }  
>    }  
>    private void printUsage() {  
>      System.out.println("Usage : multifilewc  " );  
>    }  
>    public int run(String[] args) throws Exception {  
>      if(args.length < 2) {  
>        printUsage();  
>        return 2;  
>      }  
>      Job job = new Job(getConf());  
>      job.setJobName("MultiFileWordCount");  
>      job.setJarByClass(MultiFileWordCount.class);  
>     
>      //set the InputFormat of the job to our InputFormat  
>      job.setInputFormatClass(MyInputFormat.class);  
>        
>      // the keys are words (strings)  
>      job.setOutputKeyClass(Text.class);  
>      // the values are counts (ints)  
>      job.setOutputValueClass(IntWritable.class);  
>  
>      //use the defined mapper  
>      job.setMapperClass(MapClass.class);  
>      //use the WordCount Reducer  
>      job.setCombinerClass(IntSumReducer.class);  
>      job.setReducerClass(IntSumReducer.class);  
>    
>      FileInputFormat.addInputPaths(job, args[0]);  
>      FileOutputFormat.setOutputPath(job, new Path(args[1]));  
>      return job.waitForCompletion(true) ? 0 : 1;  
>    }  
>    
>    public static void main(String[] args) throws Exception {  
>      int ret = ToolRunner.run(new MultiFileWordCount(), args);  
>      System.exit(ret);  
>    }  
>  



--
This message was sent by Atlassian JIRA
(v6.1#6144)

Mime
View raw message