Return-Path: X-Original-To: apmail-hawq-dev-archive@minotaur.apache.org Delivered-To: apmail-hawq-dev-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B2A7718F5A for ; Fri, 29 Jan 2016 00:34:43 +0000 (UTC) Received: (qmail 40557 invoked by uid 500); 29 Jan 2016 00:34:43 -0000 Delivered-To: apmail-hawq-dev-archive@hawq.apache.org Received: (qmail 40503 invoked by uid 500); 29 Jan 2016 00:34:43 -0000 Mailing-List: contact dev-help@hawq.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hawq.incubator.apache.org Delivered-To: mailing list dev@hawq.incubator.apache.org Received: (qmail 40492 invoked by uid 99); 29 Jan 2016 00:34:43 -0000 Received: from Unknown (HELO spamd1-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Jan 2016 00:34:43 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd1-us-west.apache.org (ASF Mail Server at spamd1-us-west.apache.org) with ESMTP id EDD84C0726 for ; Fri, 29 Jan 2016 00:34:42 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd1-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: 0.98 X-Spam-Level: X-Spam-Status: No, score=0.98 tagged_above=-999 required=6.31 tests=[KAM_LAZY_DOMAIN_SECURITY=1, RCVD_IN_MSPIKE_H3=-0.01, RCVD_IN_MSPIKE_WL=-0.01, RP_MATCHES_RCVD=-0.001, URIBL_BLOCKED=0.001] autolearn=disabled Received: from mx1-us-west.apache.org ([10.40.0.8]) by localhost (spamd1-us-west.apache.org [10.40.0.7]) (amavisd-new, port 10024) with ESMTP id Huzw5YYUhV7K for ; Fri, 29 Jan 2016 00:34:39 +0000 (UTC) Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx1-us-west.apache.org (ASF Mail Server at mx1-us-west.apache.org) with SMTP id E6E5E205F5 for ; Fri, 29 Jan 2016 00:34:38 +0000 (UTC) Received: (qmail 40198 invoked by uid 99); 29 Jan 2016 00:34:38 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Jan 2016 00:34:38 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id A19B9DFFF4; Fri, 29 Jan 2016 00:34:38 +0000 (UTC) From: sansanichfb To: dev@hawq.incubator.apache.org Reply-To: dev@hawq.incubator.apache.org References: In-Reply-To: Subject: [GitHub] incubator-hawq pull request: HAWQ-178: Add JSON plugin support in ... Content-Type: text/plain Message-Id: <20160129003438.A19B9DFFF4@git1-us-west.apache.org> Date: Fri, 29 Jan 2016 00:34:38 +0000 (UTC) Github user sansanichfb commented on a diff in the pull request: https://github.com/apache/incubator-hawq/pull/302#discussion_r51211063 --- Diff: pxf/pxf-json/src/main/java/org/apache/hawq/pxf/plugins/json/JsonInputFormat.java --- @@ -0,0 +1,226 @@ +package org.apache.hawq.pxf.plugins.json; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.io.BufferedInputStream; +import java.io.IOException; +import java.security.InvalidParameterException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.LineRecordReader; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.log4j.Logger; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.map.JsonMappingException; +import org.codehaus.jackson.map.ObjectMapper; + +public class JsonInputFormat extends FileInputFormat { + + private static JsonFactory factory = new JsonFactory(); + private static ObjectMapper mapper = new ObjectMapper(factory); + + public static final String ONE_RECORD_PER_LINE = "json.input.format.one.record.per.line"; + public static final String RECORD_IDENTIFIER = "json.input.format.record.identifier"; + + @Override + public RecordReader getRecordReader(InputSplit split, JobConf conf, Reporter reporter) + throws IOException { + + if (conf.getBoolean(ONE_RECORD_PER_LINE, false)) { + + return new SimpleJsonRecordReader(conf, (FileSplit) split); + } else { + return new JsonRecordReader(conf, (FileSplit) split); + } + } + + public static class SimpleJsonRecordReader implements RecordReader { + + private LineRecordReader rdr = null; + private LongWritable key = new LongWritable(); + private Text value = new Text(); + + public SimpleJsonRecordReader(Configuration conf, FileSplit split) throws IOException { + rdr = new LineRecordReader(conf, split); + } + + @Override + public void close() throws IOException { + rdr.close(); + } + + @Override + public Text createKey() { + return value; + } + + @Override + public NullWritable createValue() { + return NullWritable.get(); + } + + @Override + public long getPos() throws IOException { + return rdr.getPos(); + } + + @Override + public boolean next(Text key, NullWritable value) throws IOException { + if (rdr.next(this.key, this.value)) { + key.set(this.value); + return true; + } else { + return false; + } + } + + @Override + public float getProgress() throws IOException { + return rdr.getProgress(); + } + } + + public static class JsonRecordReader implements RecordReader { + + private Logger LOG = Logger.getLogger(JsonRecordReader.class); + + private JsonStreamReader rdr = null; + private long start = 0, end = 0; + private float toRead = 0; + private String identifier = null; + private Logger log = Logger.getLogger(JsonRecordReader.class); + + public JsonRecordReader(JobConf conf, FileSplit split) throws IOException { + log.info("JsonRecordReader constructor called. Conf is " + conf + ". Split is " + split); + this.identifier = conf.get(RECORD_IDENTIFIER); + log.info("Identifier is " + this.identifier); + + if (this.identifier == null || identifier.isEmpty()) { + throw new InvalidParameterException(JsonInputFormat.RECORD_IDENTIFIER + " is not set."); + } else { + LOG.info("Initializing JsonRecordReader with identifier " + identifier); + } + + // get relevant data + Path file = split.getPath(); + + log.info("File is " + file); + + start = split.getStart(); + end = start + split.getLength(); + toRead = end - start; + log.info("FileSystem is " + FileSystem.get(conf)); + + FSDataInputStream strm = FileSystem.get(conf).open(file); + + log.info("Retrieved file stream "); + + if (start != 0) { + strm.seek(start); + } + + rdr = new JsonStreamReader(identifier, new BufferedInputStream(strm)); + + log.info("Reader is " + rdr); + } + + @Override + public boolean next(Text key, NullWritable value) throws IOException { + + boolean retval = false; + boolean keepGoing = false; + do { + // Exit condition (end of block/file) + if (rdr.getBytesRead() >= (end - start)) { + return false; + } + + keepGoing = false; + String record = rdr.getJsonRecord(); + if (record != null) { + if (JsonInputFormat.decodeLineToJsonNode(record) == null) { + log.error("Unable to parse JSON string. Skipping. DEBUG to see"); + log.debug(record); + keepGoing = true; + } else { + key.set(record); + retval = true; + } + } + } while (keepGoing); + + return retval; + } + + @Override + public Text createKey() { + return new Text(); + } + + @Override + public NullWritable createValue() { + return NullWritable.get(); + } + + @Override + public long getPos() throws IOException { + return start + rdr.getBytesRead(); + } + + @Override + public void close() throws IOException { + rdr.close(); + } + + @Override + public float getProgress() throws IOException { + return (float) rdr.getBytesRead() / toRead; + } + } + + public static synchronized JsonNode decodeLineToJsonNode(String line) { + + try { + return mapper.readTree(line); + } catch (JsonParseException e) { + e.printStackTrace(); --- End diff -- Using log.debug instead of? --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastructure@apache.org or file a JIRA ticket with INFRA. ---