Return-Path: X-Original-To: apmail-hadoop-hdfs-user-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-user-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6DED110282 for ; Thu, 3 Sep 2015 05:48:36 +0000 (UTC) Received: (qmail 15294 invoked by uid 500); 3 Sep 2015 05:48:31 -0000 Delivered-To: apmail-hadoop-hdfs-user-archive@hadoop.apache.org Received: (qmail 15160 invoked by uid 500); 3 Sep 2015 05:48:31 -0000 Mailing-List: contact user-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: user@hadoop.apache.org Delivered-To: mailing list user@hadoop.apache.org Received: (qmail 15150 invoked by uid 99); 3 Sep 2015 05:48:30 -0000 Received: from Unknown (HELO spamd4-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 03 Sep 2015 05:48:30 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd4-us-west.apache.org (ASF Mail Server at spamd4-us-west.apache.org) with ESMTP id 4F172C0E10 for ; Thu, 3 Sep 2015 05:48:30 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd4-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: 2.88 X-Spam-Level: ** X-Spam-Status: No, score=2.88 tagged_above=-999 required=6.31 tests=[DKIM_SIGNED=0.1, DKIM_VALID=-0.1, DKIM_VALID_AU=-0.1, HTML_MESSAGE=3, RCVD_IN_MSPIKE_H3=-0.01, RCVD_IN_MSPIKE_WL=-0.01, SPF_PASS=-0.001, URIBL_BLOCKED=0.001] autolearn=disabled Authentication-Results: spamd4-us-west.apache.org (amavisd-new); dkim=pass (2048-bit key) header.d=gmail.com Received: from mx1-eu-west.apache.org ([10.40.0.8]) by localhost (spamd4-us-west.apache.org [10.40.0.11]) (amavisd-new, port 10024) with ESMTP id 2lQEPpohPtgL for ; Thu, 3 Sep 2015 05:48:21 +0000 (UTC) Received: from mail-yk0-f181.google.com (mail-yk0-f181.google.com [209.85.160.181]) by mx1-eu-west.apache.org (ASF Mail Server at mx1-eu-west.apache.org) with ESMTPS id D672A24C11 for ; Thu, 3 Sep 2015 05:48:20 +0000 (UTC) Received: by ykek143 with SMTP id k143so33457639yke.2 for ; Wed, 02 Sep 2015 22:48:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=mime-version:in-reply-to:references:date:message-id:subject:from:to :content-type; bh=SvYpFJNzD6OZvZCoLRjwMBl17hKv/VyeHojaqWadkMk=; b=WTH8PjJk1BuFat/V2gbvfdULQjvz/mfgz9+Nl9xdN9RDQi2TRz/+TyCGzDzZAXfW5P uhfbs+IHQnDtOP0BsxCnRtjCa0vTJZ3y46HQ3k8Qgo/L/NvcXzRAYrwLkLMDWAmpwPGR vJCzBxzmW4NKruQDJ6X7kmbOz07sobbOa799JpQXxkZe5f1biE/GwV+xn8IaFDyATLP8 9sZCjMq4Je13eEyd6BDPa9MTBA1596zVJwEkQS3o/emieA1T5es1jdgor1eDnLIYytJx VcAoymcl0EOlifzr+mli6A0nW+kvakOkln50f7/kAkX4aDB6qlnXmA7ob72CKeq58jIB ER5A== MIME-Version: 1.0 X-Received: by 10.129.89.10 with SMTP id n10mr7597972ywb.125.1441259299859; Wed, 02 Sep 2015 22:48:19 -0700 (PDT) Received: by 10.37.109.196 with HTTP; Wed, 2 Sep 2015 22:48:19 -0700 (PDT) In-Reply-To: <55E7157F.5050004@gmail.com> References: <55E7157F.5050004@gmail.com> Date: Thu, 3 Sep 2015 11:18:19 +0530 Message-ID: Subject: Re: Interface expected in the map definition? From: "sreebalineni ." To: user@hadoop.apache.org Content-Type: multipart/alternative; boundary=001a11471a7498f2de051ed15441 --001a11471a7498f2de051ed15441 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: quoted-printable That must be jar files missing in classpath. check this link http://stackoverflow.com/questions/15127082/running-map-reduce-job-on-cdh4-= example On Wed, Sep 2, 2015 at 8:57 PM, xeonmailinglist wrote: > I am setting my wordcount example, which is very similar to the Wordcount > example that we find in the Internet. > > 1. > > The MyMap class extends and implements the same classes as the ones > defined in the original wordcount example, but in my case I get the er= ror > of =E2=80=9CInterface expected here=E2=80=9D. I really don=E2=80=99t u= nderstand why I get this > error. See my example below [1]. Any help here? > 2. > > Is it possible to access the JobConf variable inside the map or reduce > methods? > > > [1] My Wordcount example > > > package org.apache.hadoop.mapred.examples; > > import org.apache.hadoop.conf.Configuration; > import org.apache.hadoop.fs.Path; > import org.apache.hadoop.io.IntWritable; > import org.apache.hadoop.io.LongWritable; > import org.apache.hadoop.io.Text; > import org.apache.hadoop.mapred.*; > import org.apache.hadoop.mapreduce.Job; > import org.apache.hadoop.mapreduce.Mapper; > import org.apache.hadoop.mapreduce.ReduceContext; > import org.apache.hadoop.mapreduce.Reducer; > import org.apache.hadoop.util.GenericOptionsParser; > > import java.io.IOException; > import java.util.ArrayList; > import java.util.Iterator; > import java.util.List; > import java.util.StringTokenizer; > > /** > * My example of a common wordcount. Compare with the official WordCount.= class to understand the differences between both classes. > */ > public class MyWordCount { > > public static class MyMap extends MapReduceBase implements Mapper { <<<< Interface expected here!!! > private final static IntWritable one =3D new IntWritable(1); > private Text word =3D new Text(); > > public void map(LongWritable key, Text value, OutputCollector output, Reporter reporter) throws IOException { > String line =3D value.toString(); > StringTokenizer tokenizer =3D new StringTokenizer(line); > while (tokenizer.hasMoreTokens()) { > word.set(tokenizer.nextToken()); > output.collect(word, one); > } > } > } > > public static class MyReducer > extends Reducer { > private IntWritable result =3D new IntWritable(); > MedusaDigests parser =3D new MedusaDigests(); > > public void reduce(Text key, Iterable values, > Context context > ) throws IOException, InterruptedException { > int sum =3D 0; > for (IntWritable val : values) { > System.out.println(" - key ( " + key.getClass().toString(= ) + "): " + key.toString() > + " value ( " + val.getClass().toString() + " ): = " + val.toString()); > sum +=3D val.get(); > } > result.set(sum); > context.write(key, result); > } > > public void run(Context context) throws IOException, InterruptedE= xception { > setup(context); > try { > while (context.nextKey()) { > System.out.println("Key: " + context.getCurrentKey())= ; > reduce(context.getCurrentKey(), context.getValues(), = context); > // If a back up store is used, reset it > Iterator iter =3D context.getValues().it= erator(); > if(iter instanceof ReduceContext.ValueIterator) { > ((ReduceContext.ValueIterator)iter).= resetBackupStore(); > } > } > } finally { > cleanup(context); > } > } > > protected void cleanup(Context context) > throws IOException, InterruptedException { > parser.cleanup(context); > } > } > > /** Identity mapper set by the user. */ > public static class MyFullyIndentityMapper > extends Mapper{ > > private Text word =3D new Text(); > private IntWritable val =3D new IntWritable(); > > public void map(Object key, Text value, Context context > ) throws IOException, InterruptedException { > > StringTokenizer itr =3D new StringTokenizer(value.toString())= ; > word.set(itr.nextToken()); > val.set(Integer.valueOf(itr.nextToken())); > context.write(word, val); > } > > public void run(Context context) throws IOException, InterruptedE= xception { > setup(context); > try { > while (context.nextKeyValue()) { > System.out.println("Key ( " + context.getCurrentKey()= .getClass().getName() + " ): " + context.getCurrentKey() > + " Value (" + context.getCurrentValue().getC= lass().getName() + "): " + context.getCurrentValue()); > map(context.getCurrentKey(), context.getCurrentValue(= ), context); > } > } finally { > cleanup(context); > } > } > } > > public static void main(String[] args) throws Exception { > GenericOptionsParser parser =3D new GenericOptionsParser(new Conf= iguration(), args); > > String[] otherArgs =3D parser.getRemainingArgs(); > if (otherArgs.length < 2) { > System.err.println("Usage: wordcount [...] "); > System.exit(2); > } > > // path that contains the file with all attributes necessary to t= he execution of the job > Medusa execution =3D new Medusa(args); > > // first map tasks > JobConf conf =3D new JobConf(MyWordCount.class); > conf.setJobName("wordcount"); > conf.setClass("mapreduce.job.map.identity.class", MyFullyIndentit= yMapper.class, Mapper.class); > System.out.println(conf.toString()); > > conf.setJarByClass(MyWordCount.class); > conf.setMapperClass(MyMap.class); > conf.setPartitionerClass(MyHashPartitioner.class); > conf.setReducerClass(MyReducer.class); > conf.setOutputKeyClass(Text.class); > conf.setOutputValueClass(IntWritable.class); > conf.setNumReduceTasks(1); > > List inputPaths =3D new ArrayList(); > for (int i =3D 0; i < otherArgs.length - 1; ++i) { > inputPaths.add(new Path(otherArgs[i])); > } > Path outputPath =3D new Path(otherArgs[otherArgs.length - 1]); > execution.setInputPath(inputPaths); > execution.setOutputPath(outputPath); > > // launch the job directly > execution.submit(new Job(conf)); > } > } > > =E2=80=8B > --001a11471a7498f2de051ed15441 Content-Type: text/html; charset=UTF-8 Content-Transfer-Encoding: quoted-printable
That must be jar files missing in classpath. check this li= nk=C2=A0


<= /div>

On Wed= , Sep 2, 2015 at 8:57 PM, xeonmailinglist <xeonmailinglist@gmail.c= om> wrote:
=20 =20 =20

I am setting my wordcount example, which is very similar to the Wordcount example that we find in the Internet.

  1. The MyMap class extends and implements the same classes as the ones defined in the original wordcount example, but in my case I get the error of =E2=80=9CInterface expected here=E2=80=9D. I really don=E2=80=99t understand why I= get this error. See my example below [1]. Any help here?

  2. Is it possible to access the JobConf variable inside the map or reduce methods?


[1] My Wordcount example

package org.a=
pache.hadoop.mapred.examples;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;

/**
 * My example of a common wordcount. Compare with the official WordCount.cl=
ass to understand the differences between both classes.
 */
public class MyWordCount {

    public static class MyMap extends MapReduceBase implements Mapper<Lo=
ngWritable, Text, Text, IntWritable> { <<<< Interface expect=
ed here!!!=20
        private final static IntWritable one =3D new IntWritable(1);
        private Text word =3D new Text();

        public void map(LongWritable key, Text value, OutputCollector<Te=
xt, IntWritable> output, Reporter reporter) throws IOException {
            String line =3D value.toString();
            StringTokenizer tokenizer =3D new StringTokenizer(line);
            while (tokenizer.hasMoreTokens()) {
                word.set(tokenizer.nextToken());
                output.collect(word, one);
            }
        }
    }

=C2=A0public static class MyReducer
            extends Reducer<Text,IntWritable,Text,IntWritable> {
        private IntWritable result =3D new IntWritable();
        MedusaDigests parser =3D new MedusaDigests();

        public void reduce(Text key, Iterable<IntWritable> values,
                           Context context
        ) throws IOException, InterruptedException {
            int sum =3D 0;
            for (IntWritable val : values) {
                System.out.println(" - key ( " + key.getClass().t=
oString() + "): " + key.toString()
                        + " value ( " + val.getClass().toString()=
 + " ): " + val.toString());
                sum +=3D val.get();
            }
            result.set(sum);
            context.write(key, result);
        }

        public void run(Context context) throws IOException, InterruptedExc=
eption {
            setup(context);
            try {
                while (context.nextKey()) {
                    System.out.println("Key: " + context.getCurre=
ntKey());
                    reduce(context.getCurrentKey(), context.getValues(), co=
ntext);
                    // If a back up store is used, reset it
                    Iterator<IntWritable> iter =3D context.getValues(=
).iterator();
                    if(iter instanceof ReduceContext.ValueIterator) {
                        ((ReduceContext.ValueIterator<IntWritable>)it=
er).resetBackupStore();
                    }
                }
            } finally {
                cleanup(context);
            }
        }

        protected void cleanup(Context context)
                throws IOException, InterruptedException {
            parser.cleanup(context);
        }
    }

    /** Identity mapper set by the user. */
    public static class MyFullyIndentityMapper
            extends Mapper<Object, Text, Text, IntWritable>{

        private Text word =3D new Text();
        private IntWritable val =3D new IntWritable();

        public void map(Object key, Text value, Context context
        ) throws IOException, InterruptedException {

            StringTokenizer itr =3D new StringTokenizer(value.toString());
            word.set(itr.nextToken());
            val.set(Integer.valueOf(itr.nextToken()));
            context.write(word, val);
        }

        public void run(Context context) throws IOException, InterruptedExc=
eption {
            setup(context);
            try {
                while (context.nextKeyValue()) {
                    System.out.println("Key ( " + context.getCurr=
entKey().getClass().getName() + " ): " + context.getCurrentKey()
                            + " Value (" + context.getCurrentValu=
e().getClass().getName() + "): " + context.getCurrentValue());
                    map(context.getCurrentKey(), context.getCurrentValue(),=
 context);
                }
            } finally {
                cleanup(context);
            }
        }
    }

    public static void main(String[] args) throws Exception {
        GenericOptionsParser parser =3D new GenericOptionsParser(new Config=
uration(), args);

        String[] otherArgs =3D parser.getRemainingArgs();
        if (otherArgs.length < 2) {
            System.err.println("Usage: wordcount [<in>...] <o=
ut>");
            System.exit(2);
        }

        // path that contains the file with all attributes necessary to the=
 execution of the job
        Medusa execution =3D new Medusa(args);

        // first map tasks
        JobConf conf =3D new JobConf(MyWordCount.class);
        conf.setJobName("wordcount");
        conf.setClass("mapreduce.job.map.identity.class", MyFully=
IndentityMapper.class, Mapper.class);
        System.out.println(conf.toString());

        conf.setJarByClass(MyWordCount.class);
        conf.setMapperClass(MyMap.class);
        conf.setPartitionerClass(MyHashPartitioner.class);
        conf.setReducerClass(MyReducer.class);
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);
        conf.setNumReduceTasks(1);

        List<Path> inputPaths =3D new ArrayList<Path>();
        for (int i =3D 0; i < otherArgs.length - 1; ++i) {
            inputPaths.add(new Path(otherArgs[i]));
        }
        Path outputPath =3D  new Path(otherArgs[otherArgs.length - 1]);
        execution.setInputPath(inputPaths);
        execution.setOutputPath(outputPath);

        // launch the job directly
        execution.submit(new Job(conf));
    }
}
=E2=80=8B

--001a11471a7498f2de051ed15441--