hudi-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [hudi] danny0405 commented on a change in pull request #3134: [HUDI-2052] Support load logFile in BootstrapFunction
Date Thu, 24 Jun 2021 03:38:04 GMT

danny0405 commented on a change in pull request #3134:
URL: https://github.com/apache/hudi/pull/3134#discussion_r657601079



##########
File path: hudi-flink/src/main/java/org/apache/hudi/sink/bootstrap/BootstrapFunction.java
##########
@@ -168,33 +178,74 @@ private HoodieFlinkTable getTable() {
    * @param partitionPath The partition path
    */
   @SuppressWarnings("unchecked")
-  private void loadRecords(String partitionPath, Collector<O> out) {
+  private void loadRecords(String partitionPath, Collector<O> out) throws Exception
{
     long start = System.currentTimeMillis();
+
     BaseFileUtils fileUtils = BaseFileUtils.getInstance(this.hoodieTable.getBaseFileFormat());
-    List<HoodieBaseFile> latestBaseFiles =
-        HoodieIndexUtils.getLatestBaseFilesForPartition(partitionPath, this.hoodieTable);
-    LOG.info("All baseFile in partition {} size = {}", partitionPath, latestBaseFiles.size());
+    Schema schema = new TableSchemaResolver(this.hoodieTable.getMetaClient()).getTableAvroSchema();
 
     final int parallelism = getRuntimeContext().getNumberOfParallelSubtasks();
     final int maxParallelism = getRuntimeContext().getMaxNumberOfParallelSubtasks();
     final int taskID = getRuntimeContext().getIndexOfThisSubtask();
-    for (HoodieBaseFile baseFile : latestBaseFiles) {
-      boolean shouldLoad = KeyGroupRangeAssignment.assignKeyToParallelOperator(
-          baseFile.getFileId(), maxParallelism, parallelism) == taskID;
-
-      if (shouldLoad) {
-        LOG.info("Load records from file {}.", baseFile);
-        final List<HoodieKey> hoodieKeys;
-        try {
-          hoodieKeys =
-              fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new Path(baseFile.getPath()));
-        } catch (Exception e) {
-          throw new HoodieException(String.format("Error when loading record keys from file:
%s", baseFile), e);
-        }
 
-        for (HoodieKey hoodieKey : hoodieKeys) {
-          out.collect((O) new IndexRecord(generateHoodieRecord(hoodieKey, baseFile)));
-        }
+    Option<HoodieInstant> latestCommitTime = this.hoodieTable.getMetaClient().getCommitsTimeline()
+        .filterCompletedInstants().lastInstant();
+
+    if (latestCommitTime.isPresent()) {
+      List<FileSlice> fileSlices = this.hoodieTable.getSliceView()
+          .getLatestFileSlicesBeforeOrOn(partitionPath, latestCommitTime.get().getTimestamp(),
true)
+          .collect(toList());
+
+      for (FileSlice fileSlice : fileSlices) {
+        // load parquet records
+        fileSlice.getBaseFile().ifPresent(baseFile -> {
+          // filter out crushed files
+          if (baseFile.getFileSize() <= 0) {
+            return;
+          }
+
+          if (shouldLoadFileId(baseFile.getFileId(), maxParallelism, parallelism, taskID))
{
+            LOG.info("Load records from file {}.", baseFile);
+            final List<HoodieKey> hoodieKeys;
+            try {
+              hoodieKeys =
+                  fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new Path(baseFile.getPath()));
+            } catch (Exception e) {
+              throw new HoodieException(String.format("Error when loading record keys from
file: %s", baseFile), e);
+            }
+
+            for (HoodieKey hoodieKey : hoodieKeys) {
+              out.collect((O) new IndexRecord(generateHoodieRecord(hoodieKey, baseFile)));
+            }
+          }
+        });
+
+        // load avro log records
+        fileSlice.getLogFiles().forEach(logFile -> {
+          // filter out crushed files

Review comment:
       You can read the logs all together with the scanner.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



Mime
View raw message