Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id B3CD3200C81 for ; Thu, 11 May 2017 18:24:23 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id B267B160BB2; Thu, 11 May 2017 16:24:23 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 0E23B160BC7 for ; Thu, 11 May 2017 18:24:22 +0200 (CEST) Received: (qmail 42314 invoked by uid 500); 11 May 2017 16:24:22 -0000 Mailing-List: contact issues-help@carbondata.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@carbondata.apache.org Delivered-To: mailing list issues@carbondata.apache.org Received: (qmail 42305 invoked by uid 99); 11 May 2017 16:24:22 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 11 May 2017 16:24:22 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id E3958DF984; Thu, 11 May 2017 16:24:21 +0000 (UTC) From: QiangCai To: issues@carbondata.apache.org Reply-To: issues@carbondata.apache.org References: In-Reply-To: Subject: [GitHub] carbondata pull request #842: [CARBONDATA-937] Data loading for partition ta... Content-Type: text/plain Message-Id: <20170511162421.E3958DF984@git1-us-west.apache.org> Date: Thu, 11 May 2017 16:24:21 +0000 (UTC) archived-at: Thu, 11 May 2017 16:24:23 -0000 Github user QiangCai commented on a diff in the pull request: https://github.com/apache/carbondata/pull/842#discussion_r116036488 --- Diff: integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala --- @@ -931,6 +954,82 @@ object CarbonDataRDDFactory { } + /** + * repartition the input data for partiton table. + * @param sqlContext + * @param dataFrame + * @param carbonLoadModel + * @return + */ + private def repartitionInputData(sqlContext: SQLContext, + dataFrame: Option[DataFrame], + carbonLoadModel: CarbonLoadModel): RDD[Row] = { + val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable + val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName) + val partitionColumn = partitionInfo.getColumnSchemaList.get(0).getColumnName + val partitionColumnDataType = partitionInfo.getColumnSchemaList.get(0).getDataType + val columns = carbonLoadModel.getCsvHeaderColumns + var partitionColumnIndex = -1 + for (i <- 0 until columns.length) { + if (partitionColumn.equals(columns(i))) { + partitionColumnIndex = i + } + } + if (partitionColumnIndex == -1) { + throw new DataLoadingException("Partition column not found.") + } + // generate RDD[(K, V)] to use the partitionBy method of PairRDDFunctions + val inputRDD: RDD[(String, Row)] = if (dataFrame.isDefined) { + // input data from DataFrame + val timestampFormatString = CarbonProperties.getInstance().getProperty(CarbonCommonConstants + .CARBON_TIMESTAMP_FORMAT, CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT) + val timeStampFormat = new SimpleDateFormat(timestampFormatString) + val dateFormatString = CarbonProperties.getInstance().getProperty(CarbonCommonConstants + .CARBON_DATE_FORMAT, CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT) + val dateFormat = new SimpleDateFormat(dateFormatString) + val delimiterLevel1 = carbonLoadModel.getComplexDelimiterLevel1 + val delimiterLevel2 = carbonLoadModel.getComplexDelimiterLevel2 + val serializationNullFormat = + carbonLoadModel.getSerializationNullFormat.split(CarbonCommonConstants.COMMA, 2)(1) + dataFrame.get.rdd.map { row => + (CarbonScalaUtil.getString(row.get(partitionColumnIndex), serializationNullFormat, + delimiterLevel1, delimiterLevel2, timeStampFormat, dateFormat), row) + } + } else { + // input data from files + val hadoopConfiguration = new Configuration() + CommonUtil.configureCSVInputFormat(hadoopConfiguration, carbonLoadModel) + hadoopConfiguration.set(FileInputFormat.INPUT_DIR, carbonLoadModel.getFactFilePath) + val columnCount = columns.length + new NewHadoopRDD[NullWritable, StringArrayWritable]( + sqlContext.sparkContext, + classOf[CSVInputFormat], + classOf[NullWritable], + classOf[StringArrayWritable], + hadoopConfiguration) + .map { currentRow => + val row = new StringArrayRow(new Array[String](columnCount)) + (currentRow._2.get()(partitionColumnIndex), row.setValues(currentRow._2.get())) + } + } + if (partitionColumnDataType == DataType.STRING) { + if (partitionInfo.getPartitionType == PartitionType.RANGE) { + inputRDD.map { row => (ByteUtil.toBytes(row._1), row._2) } + .partitionBy(PartitionFactory.getPartitioner(partitionInfo)) --- End diff -- fixed --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastructure@apache.org or file a JIRA ticket with INFRA. ---