pinot-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From xian...@apache.org
Subject [pinot] branch master updated: Fixing code style for pinot-plugins (#7314)
Date Wed, 18 Aug 2021 10:14:02 GMT
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pinot.git


The following commit(s) were added to refs/heads/master by this push:
     new ae83527  Fixing code style for pinot-plugins (#7314)
ae83527 is described below

commit ae835271fb1a3cc76920fae1e1e49604b4812a0f
Author: Xiang Fu <xiangfu.1024@gmail.com>
AuthorDate: Wed Aug 18 03:13:46 2021 -0700

    Fixing code style for pinot-plugins (#7314)
    
    * Fix pinot-plugins code styling
    
    * fixing code style for pinot-minion-builtin-tasks
    
    * reformat
---
 config/suppressions.xml                            |  10 +
 .../pinot-batch-ingestion-common/pom.xml           |   3 -
 .../batch/common/SegmentGenerationJobUtils.java    |   2 +
 .../pinot-batch-ingestion-hadoop/pom.xml           |   3 -
 .../batch/hadoop/HadoopSegmentCreationMapper.java  |   3 +-
 .../pinot-batch-ingestion-spark/pom.xml            |   3 -
 .../pinot-batch-ingestion-standalone/pom.xml       |   3 -
 .../ingestion/batch/standalone/JobUtils.java       |   2 +
 .../v0_deprecated/pinot-hadoop/pom.xml             |   3 -
 .../pinot/hadoop/PinotHadoopJobLauncher.java       |   5 +-
 .../pinot/hadoop/io/CombineAvroKeyInputFormat.java |   3 +-
 .../apache/pinot/hadoop/io/PinotOutputFormat.java  |   8 +-
 .../apache/pinot/hadoop/io/PinotRecordWriter.java  |   7 +-
 .../pinot/hadoop/job/HadoopSegmentCreationJob.java |   7 +-
 .../hadoop/job/HadoopSegmentPreprocessingJob.java  |  37 +--
 .../pinot/hadoop/job/InternalConfigConstants.java  |   3 +
 .../job/mappers/AvroDataPreprocessingMapper.java   |  14 +-
 .../job/mappers/OrcDataPreprocessingMapper.java    |  14 +-
 .../hadoop/job/mappers/SegmentCreationMapper.java  |  57 ++--
 .../job/mappers/SegmentPreprocessingMapper.java    |   9 +-
 .../AvroDataPreprocessingPartitioner.java          |  15 +-
 .../job/partitioners/GenericPartitioner.java       |   3 +-
 .../OrcDataPreprocessingPartitioner.java           |  14 +-
 .../preprocess/AvroDataPreprocessingHelper.java    |   6 +-
 .../job/preprocess/DataPreprocessingHelper.java    |  14 +-
 .../preprocess/DataPreprocessingHelperFactory.java |   9 +-
 .../job/preprocess/OrcDataPreprocessingHelper.java |  13 +-
 .../utils/PinotHadoopJobPreparationHelper.java     |   3 +
 .../utils/preprocess/DataPreprocessingUtils.java   |   7 +-
 .../pinot/hadoop/utils/preprocess/OrcUtils.java    |   3 +-
 .../hadoop/data/IngestionSchemaValidatorTest.java  |  74 ++---
 .../pinot/hadoop/io/PinotOutputFormatTest.java     |   9 +-
 .../v0_deprecated/pinot-ingestion-common/pom.xml   |   3 -
 .../pinot/ingestion/common/JobConfigConstants.java |   3 +
 .../ingestion/utils/JobPreparationHelper.java      |   3 +
 .../v0_deprecated/pinot-spark/pom.xml              |   3 -
 .../apache/pinot/spark/PinotSparkJobLauncher.java  |   5 +-
 .../spark/jobs/SparkSegmentCreationFunction.java   |  48 ++--
 .../pinot/spark/jobs/SparkSegmentCreationJob.java  |  16 +-
 .../pinot/spark/jobs/SparkSegmentTarPushJob.java   |   9 +-
 .../pinot/spark/jobs/SparkSegmentUriPushJob.java   |   3 +-
 .../utils/PinotSparkJobPreparationHelper.java      |   6 +-
 .../pinot/spark}/SegmentCreationSparkTest.java     |   7 +-
 .../plugin/provider/AzureEnvironmentProvider.java  |  36 +--
 .../provider/AzureEnvironmentProviderTest.java     |  55 ++--
 pinot-plugins/pinot-environment/pom.xml            |   3 -
 pinot-plugins/pinot-file-system/pinot-adls/pom.xml |   3 -
 .../pinot/plugin/filesystem/ADLSGen2PinotFS.java   | 117 ++++----
 .../pinot/plugin/filesystem/AzurePinotFS.java      |   4 +-
 .../pinot/plugin/filesystem/AzurePinotFSUtil.java  |   6 +-
 .../filesystem/test/ADLSGen2PinotFSTest.java       |  62 ++--
 .../plugin/filesystem/test/AzurePinotFSTest.java   |   3 +-
 .../filesystem/test/AzurePinotFSUtilTest.java      |  18 +-
 .../pinot-adls/src/test/resources/log4j2.xml       |   2 +-
 pinot-plugins/pinot-file-system/pinot-gcs/pom.xml  |   3 -
 .../apache/pinot/plugin/filesystem/GcsPinotFS.java |  43 +--
 .../org/apache/pinot/plugin/filesystem/GcsUri.java |  11 +-
 .../pinot/plugin/filesystem/TestGcsPinotFS.java    | 315 ++++++++++-----------
 .../apache/pinot/plugin/filesystem/TestGcsUri.java |  73 ++---
 pinot-plugins/pinot-file-system/pinot-hdfs/pom.xml |   3 -
 .../pinot/plugin/filesystem/HadoopPinotFS.java     |  16 +-
 pinot-plugins/pinot-file-system/pinot-s3/pom.xml   |   3 -
 .../apache/pinot/plugin/filesystem/S3PinotFS.java  |  38 +--
 .../pinot/plugin/filesystem/S3PinotFSTest.java     |  70 ++---
 .../pinot/plugin/filesystem/S3TestUtils.java       |   2 +
 .../pinot-input-format/pinot-avro-base/pom.xml     |   3 -
 .../avro/AvroIngestionSchemaValidator.java         |  32 +--
 .../plugin/inputformat/avro/AvroSchemaUtil.java    |   3 +
 .../pinot/plugin/inputformat/avro/AvroUtils.java   |  62 ++--
 .../avro/AvroRecordExtractorComplexTypesTest.java  |  83 +++---
 .../inputformat/avro/AvroRecordExtractorTest.java  |  17 +-
 .../avro/AvroRecordToPinotRowGeneratorTest.java    |   6 +-
 .../plugin/inputformat/avro/AvroUtilsTest.java     | 102 +++----
 .../pinot-input-format/pinot-avro/pom.xml          |   3 -
 .../inputformat/avro/KafkaAvroMessageDecoder.java  |  51 ++--
 .../pinot-confluent-avro/pom.xml                   |   3 -
 ...aConfluentSchemaRegistryAvroMessageDecoder.java |  17 +-
 pinot-plugins/pinot-input-format/pinot-csv/pom.xml |   3 -
 .../inputformat/csv/CSVRecordExtractorConfig.java  |   3 +-
 .../inputformat/csv/CSVRecordExtractorTest.java    |   5 +-
 .../inputformat/csv/CSVRecordReaderTest.java       |  11 +-
 .../pinot-input-format/pinot-json/pom.xml          |   3 -
 .../inputformat/json/JSONMessageDecoder.java       |   3 +-
 .../inputformat/json/JSONRecordExtractorTest.java  |   5 +-
 .../inputformat/json/JSONRecordReaderTest.java     |   6 +-
 pinot-plugins/pinot-input-format/pinot-orc/pom.xml |   3 -
 .../plugin/inputformat/orc/ORCRecordReader.java    |  19 +-
 .../inputformat/orc/ORCRecordExtractorTest.java    |  80 ++----
 .../inputformat/orc/ORCRecordReaderTest.java       |   8 +-
 .../pinot-input-format/pinot-parquet/pom.xml       |   3 -
 .../parquet/ParquetNativeRecordExtractor.java      |  13 +-
 .../parquet/ParquetNativeRecordReader.java         |   8 +-
 .../plugin/inputformat/parquet/ParquetUtils.java   |  10 +-
 .../parquet/ParquetRecordReaderTest.java           |   6 +-
 .../pinot-input-format/pinot-protobuf/pom.xml      |   3 -
 .../inputformat/protobuf/ProtoBufFieldInfo.java    |   2 +-
 .../protobuf/ProtoBufRecordExtractor.java          |  12 +-
 .../inputformat/protobuf/ProtoBufRecordReader.java |   3 +-
 .../protobuf/ProtoBufRecordExtractorTest.java      |  43 +--
 .../protobuf/ProtoBufRecordReaderTest.java         |  10 +-
 .../pinot-protobuf/src/test/resources/log4j2.xml   |   2 +-
 .../pinot-input-format/pinot-thrift/pom.xml        |   3 -
 .../inputformat/thrift/ThriftRecordExtractor.java  |   2 +-
 .../inputformat/thrift/ThriftRecordReader.java     |   3 +-
 .../thrift/ThriftRecordExtractorTest.java          |  57 ++--
 .../inputformat/thrift/ThriftRecordReaderTest.java |   9 +-
 .../pinot-thrift/src/test/resources/log4j2.xml     |   2 +-
 .../pinot-metrics/pinot-dropwizard/pom.xml         |   3 -
 pinot-plugins/pinot-metrics/pinot-yammer/pom.xml   |   3 -
 .../metrics/yammer/YammerMetricsRegistry.java      |   2 +-
 .../pinot-minion-builtin-tasks/pom.xml             |   3 -
 .../BaseMultipleSegmentsConversionExecutor.java    |  43 ++-
 .../tasks/BaseSingleSegmentConversionExecutor.java |  35 +--
 .../plugin/minion/tasks/BaseTaskExecutor.java      |  10 +-
 .../pinot/plugin/minion/tasks/MergeTaskUtils.java  |  21 +-
 .../minion/tasks/SegmentConversionResult.java      |   3 +-
 .../minion/tasks/SegmentConversionUtils.java       |  40 ++-
 .../ConvertToRawIndexTaskExecutor.java             |  15 +-
 .../ConvertToRawIndexTaskExecutorFactory.java      |   2 +-
 .../ConvertToRawIndexTaskGenerator.java            |  11 +-
 .../MergeRollupTaskExecutor.java                   |  21 +-
 .../MergeRollupTaskExecutorFactory.java            |   2 +-
 .../MergeRollupTaskGenerator.java                  | 127 ++++-----
 .../MergeRollupTaskUtils.java                      |   5 +-
 .../minion/tasks/purge/PurgeTaskExecutor.java      |  18 +-
 .../RealtimeToOfflineSegmentsTaskExecutor.java     |  33 +--
 ...altimeToOfflineSegmentsTaskExecutorFactory.java |   2 +-
 .../RealtimeToOfflineSegmentsTaskGenerator.java    |  68 ++---
 .../SegmentGenerationAndPushResult.java            |   5 +-
 .../SegmentGenerationAndPushTaskExecutor.java      |  47 ++-
 ...egmentGenerationAndPushTaskExecutorFactory.java |   2 +-
 .../SegmentGenerationAndPushTaskGenerator.java     |  47 ++-
 .../SegmentGenerationAndPushTaskUtils.java         |   4 +-
 .../plugin/minion/tasks/MergeTaskUtilsTest.java    |  25 +-
 .../plugin/minion/tasks/TaskRegistryTest.java      |  16 +-
 .../MergeRollupTaskExecutorTest.java               |   8 +-
 .../MergeRollupTaskGeneratorTest.java              | 284 +++++++------------
 .../MergeRollupTaskUtilsTest.java                  |   2 +-
 .../minion/tasks/purge/PurgeTaskExecutorTest.java  |   4 +-
 .../RealtimeToOfflineSegmentsTaskExecutorTest.java | 127 +++------
 ...RealtimeToOfflineSegmentsTaskGeneratorTest.java | 136 ++++-----
 .../SegmentGenerationAndPushTaskGeneratorTest.java |  11 +-
 pinot-plugins/pinot-segment-uploader/pom.xml       |   3 -
 .../pinot-segment-writer-file-based/pom.xml        |   3 -
 .../filebased/FileBasedSegmentWriter.java          |  37 +--
 .../filebased/FileBasedSegmentWriterTest.java      |  88 ++----
 .../pinot-stream-ingestion/pinot-kafka-0.9/pom.xml |   3 -
 .../plugin/stream/kafka09/ConsumerAndIterator.java |   5 +-
 .../plugin/stream/kafka09/KafkaBrokerWrapper.java  |   2 +-
 .../stream/kafka09/KafkaConnectionHandler.java     |  14 +-
 .../stream/kafka09/KafkaConsumerManager.java       |   2 +
 .../stream/kafka09/KafkaHighLevelStreamConfig.java |  20 +-
 .../stream/kafka09/KafkaStreamLevelConsumer.java   |  56 ++--
 .../kafka09/KafkaStreamMetadataProvider.java       |  14 +-
 .../stream/kafka09/SimpleConsumerMessageBatch.java |  14 +-
 .../stream/kafka09/server/KafkaDataProducer.java   |   8 +-
 .../kafka09/server/KafkaDataServerStartable.java   |   2 +-
 .../kafka09/KafkaLowLevelStreamConfigTest.java     |   4 +-
 .../kafka09/KafkaPartitionLevelConsumerTest.java   |  68 ++---
 .../pinot-stream-ingestion/pinot-kafka-2.0/pom.xml |   3 -
 .../plugin/stream/kafka20/KafkaMessageBatch.java   |  14 +-
 .../stream/kafka20/KafkaStreamLevelConsumer.java   |  72 ++---
 .../kafka20/KafkaStreamLevelConsumerManager.java   |   2 +
 .../kafka20/KafkaStreamLevelStreamConfig.java      |  39 +--
 .../stream/kafka20/server/KafkaDataProducer.java   |  14 +-
 .../kafka20/server/KafkaDataServerStartable.java   |  40 +--
 .../pinot-kafka-base/pom.xml                       |   3 -
 .../stream/kafka/KafkaStreamConfigProperties.java  |   3 +
 .../stream/kafka/KafkaJSONMessageDecoderTest.java  |   5 +-
 .../pinot-stream-ingestion/pinot-kinesis/pom.xml   |   3 -
 .../pinot/plugin/stream/kinesis/KinesisConfig.java |   4 +-
 .../kinesis/KinesisPartitionGroupOffset.java       |   8 +-
 .../plugin/stream/kinesis/KinesisConsumerTest.java |  62 ++--
 .../kinesis/KinesisStreamMetadataProviderTest.java |  66 +++--
 .../pinot-stream-ingestion/pinot-pulsar/pom.xml    |   3 -
 .../stream/pulsar/MessageIdStreamOffset.java       |   2 +-
 .../plugin/stream/pulsar/PulsarMessageBatch.java   |  14 +-
 .../stream/pulsar/PulsarStreamLevelConsumer.java   |  30 +-
 .../pulsar/PulsarStreamLevelConsumerManager.java   |   3 +
 .../pulsar/PulsarStreamMetadataProvider.java       |   2 +-
 .../stream/pulsar/PulsarStandaloneCluster.java     |   2 +-
 181 files changed, 1679 insertions(+), 2323 deletions(-)

diff --git a/config/suppressions.xml b/config/suppressions.xml
index 03e6c68..8ea9d1f 100644
--- a/config/suppressions.xml
+++ b/config/suppressions.xml
@@ -30,4 +30,14 @@
 
   <!-- Suppress autogenerated parser files -->
   <suppress checks=".*" files="target/generated-sources"/>
+
+  <!-- Suppress autogenerated test files by thrift compiler -->
+  <suppress checks=".*" files="/thrift/ComplexTypes.java"/>
+  <suppress checks=".*" files="/thrift/NestedType.java"/>
+  <suppress checks=".*" files="/thrift/TestEnum.java"/>
+  <suppress checks=".*" files="/thrift/ThriftSampleData.java"/>
+
+  <!-- Suppress autogenerated test files by protobuf compiler -->
+  <suppress checks=".*" files="/protobuf/ComplexTypes.java"/>
+  <suppress checks=".*" files="/protobuf/Sample.java"/>
 </suppressions>
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/pom.xml b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/pom.xml
index 261de1d..190f8c1 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/pom.xml
@@ -36,8 +36,5 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>none</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 </project>
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/src/main/java/org/apache/pinot/plugin/ingestion/batch/common/SegmentGenerationJobUtils.java b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/src/main/java/org/apache/pinot/plugin/ingestion/batch/common/SegmentGenerationJobUtils.java
index 6c110d1..b63e7fd 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/src/main/java/org/apache/pinot/plugin/ingestion/batch/common/SegmentGenerationJobUtils.java
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-common/src/main/java/org/apache/pinot/plugin/ingestion/batch/common/SegmentGenerationJobUtils.java
@@ -23,6 +23,8 @@ import org.apache.pinot.spi.ingestion.batch.spec.SegmentNameGeneratorSpec;
 
 
 public class SegmentGenerationJobUtils implements Serializable {
+  private SegmentGenerationJobUtils() {
+  }
 
   /**
    * Always use local directory sequence id unless explicitly config: "use.global.directory.sequence.id".
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/pom.xml b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/pom.xml
index b422933..0be0798 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/pom.xml
@@ -37,9 +37,6 @@
     <pinot.root>${basedir}/../../..</pinot.root>
     <hadoop.version>2.7.0</hadoop.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/src/main/java/org/apache/pinot/plugin/ingestion/batch/hadoop/HadoopSegmentCreationMapper.java b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/src/main/java/org/apache/pinot/plugin/ingestion/batch/hadoop/HadoopSegmentCreationMapper.java
index 7efb69a..06d97b6 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/src/main/java/org/apache/pinot/plugin/ingestion/batch/hadoop/HadoopSegmentCreationMapper.java
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-hadoop/src/main/java/org/apache/pinot/plugin/ingestion/batch/hadoop/HadoopSegmentCreationMapper.java
@@ -57,11 +57,10 @@ import static org.apache.pinot.spi.plugin.PluginManager.PLUGINS_INCLUDE_PROPERTY
 
 
 public class HadoopSegmentCreationMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
+  protected static final Logger LOGGER = LoggerFactory.getLogger(HadoopSegmentCreationMapper.class);
   protected static final String PROGRESS_REPORTER_THREAD_NAME = "pinot-hadoop-progress-reporter";
   protected static final long PROGRESS_REPORTER_JOIN_WAIT_TIME_MS = 5_000L;
 
-  protected final Logger LOGGER = LoggerFactory.getLogger(HadoopSegmentCreationMapper.class);
-
   protected Configuration _jobConf;
   protected SegmentGenerationJobSpec _spec;
   private File _localTempDir;
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-spark/pom.xml b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-spark/pom.xml
index 744521d..5ab7653 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-spark/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-spark/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/pom.xml b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/pom.xml
index 652809b..e34e6e4 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/src/main/java/org/apache/pinot/plugin/ingestion/batch/standalone/JobUtils.java b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/src/main/java/org/apache/pinot/plugin/ingestion/batch/standalone/JobUtils.java
index 506e38b..57373fb 100644
--- a/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/src/main/java/org/apache/pinot/plugin/ingestion/batch/standalone/JobUtils.java
+++ b/pinot-plugins/pinot-batch-ingestion/pinot-batch-ingestion-standalone/src/main/java/org/apache/pinot/plugin/ingestion/batch/standalone/JobUtils.java
@@ -19,6 +19,8 @@
 package org.apache.pinot.plugin.ingestion.batch.standalone;
 
 public class JobUtils {
+  private JobUtils() {
+  }
 
   public static int getNumThreads(int jobParallelism) {
     int numCores = Math.max(Runtime.getRuntime().availableProcessors(), 1);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/pom.xml b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/pom.xml
index bebc81c..f05fa40 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/pom.xml
@@ -34,9 +34,6 @@
   <properties>
     <pinot.root>${basedir}/../../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <profiles>
     <profile>
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/PinotHadoopJobLauncher.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/PinotHadoopJobLauncher.java
index a8c46d5..3f6c06c 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/PinotHadoopJobLauncher.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/PinotHadoopJobLauncher.java
@@ -29,10 +29,11 @@ import org.apache.pinot.ingestion.jobs.SegmentUriPushJob;
 
 
 public class PinotHadoopJobLauncher {
+  private PinotHadoopJobLauncher() {
+  }
 
   private static final String USAGE = "usage: [job_type] [job.properties]";
-  private static final String SUPPORT_JOB_TYPES =
-      "\tsupport job types: " + Arrays.toString(PinotIngestionJobType.values());
+  private static final String SUPPORT_JOB_TYPES = "\tsupport job types: " + Arrays.toString(PinotIngestionJobType.values());
 
   private static void usage() {
     System.err.println(USAGE);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/CombineAvroKeyInputFormat.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/CombineAvroKeyInputFormat.java
index a36f8db..b9f64ec 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/CombineAvroKeyInputFormat.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/CombineAvroKeyInputFormat.java
@@ -44,8 +44,7 @@ public class CombineAvroKeyInputFormat<T> extends CombineFileInputFormat<AvroKey
       throws IOException {
     Class cls = AvroKeyRecordReaderWrapper.class;
 
-    return new CombineFileRecordReader<>((CombineFileSplit) split, context,
-        (Class<? extends RecordReader<AvroKey<T>, NullWritable>>) cls);
+    return new CombineFileRecordReader<>((CombineFileSplit) split, context, (Class<? extends RecordReader<AvroKey<T>, NullWritable>>) cls);
   }
 
   public static class AvroKeyRecordReaderWrapper<T> extends CombineFileRecordReaderWrapper<AvroKey<T>, NullWritable> {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotOutputFormat.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotOutputFormat.java
index 91da8b1..695dccb 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotOutputFormat.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotOutputFormat.java
@@ -98,8 +98,7 @@ public class PinotOutputFormat<T> extends FileOutputFormat<NullWritable, T> {
       //noinspection unchecked
       return (FieldExtractor<T>) conf.getClassByName(conf.get(PinotOutputFormat.FIELD_EXTRACTOR_CLASS)).newInstance();
     } catch (Exception e) {
-      throw new IllegalStateException(
-          "Caught exception while creating instance of field extractor configured with key: " + FIELD_EXTRACTOR_CLASS);
+      throw new IllegalStateException("Caught exception while creating instance of field extractor configured with key: " + FIELD_EXTRACTOR_CLASS);
     }
   }
 
@@ -107,9 +106,8 @@ public class PinotOutputFormat<T> extends FileOutputFormat<NullWritable, T> {
       throws IOException {
     SegmentGeneratorConfig segmentGeneratorConfig = getSegmentGeneratorConfig(job);
     FieldExtractor<T> fieldExtractor = getFieldExtractor(job);
-    Set<String> fieldsToRead = IngestionUtils
-        .getFieldsForRecordExtractor(segmentGeneratorConfig.getTableConfig().getIngestionConfig(),
-            segmentGeneratorConfig.getSchema());
+    Set<String> fieldsToRead =
+        IngestionUtils.getFieldsForRecordExtractor(segmentGeneratorConfig.getTableConfig().getIngestionConfig(), segmentGeneratorConfig.getSchema());
     fieldExtractor.init(job.getConfiguration(), fieldsToRead);
     return new PinotRecordWriter<>(job, segmentGeneratorConfig, fieldExtractor);
   }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotRecordWriter.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotRecordWriter.java
index 3c145ac..1dfabfe 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotRecordWriter.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/io/PinotRecordWriter.java
@@ -52,8 +52,7 @@ public class PinotRecordWriter<T> extends RecordWriter<NullWritable, T> {
   private final FileSystem _fileSystem;
   private final Path _outputDir;
 
-  public PinotRecordWriter(TaskAttemptContext job, SegmentGeneratorConfig segmentGeneratorConfig,
-      FieldExtractor<T> fieldExtractor)
+  public PinotRecordWriter(TaskAttemptContext job, SegmentGeneratorConfig segmentGeneratorConfig, FieldExtractor<T> fieldExtractor)
       throws IOException {
     _segmentGeneratorConfig = segmentGeneratorConfig;
     _fieldExtractor = fieldExtractor;
@@ -119,8 +118,6 @@ public class PinotRecordWriter<T> extends RecordWriter<NullWritable, T> {
     LOGGER.info("Copying segment tar file from local: {} to HDFS: {}", segmentTarFile, hdfsSegmentTarPath);
     _fileSystem.copyFromLocalFile(true, new Path(segmentTarFile.getPath()), hdfsSegmentTarPath);
 
-    LOGGER
-        .info("Finish creating segment: {} from data file: {} of sequence id: {} into HDFS: {}", segmentName, dataFile,
-            sequenceId, hdfsSegmentTarPath);
+    LOGGER.info("Finish creating segment: {} from data file: {} of sequence id: {} into HDFS: {}", segmentName, dataFile, sequenceId, hdfsSegmentTarPath);
   }
 }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentCreationJob.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentCreationJob.java
index 474d9a4..d6f2181 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentCreationJob.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentCreationJob.java
@@ -78,8 +78,7 @@ public class HadoopSegmentCreationJob extends SegmentCreationJob {
       _logger.info("Creating segments with data files: {}", dataFilePaths);
       for (int i = 0; i < numDataFiles; i++) {
         Path dataFilePath = dataFilePaths.get(i);
-        try (DataOutputStream dataOutputStream = _outputDirFileSystem
-            .create(new Path(stagingInputDir, Integer.toString(i)))) {
+        try (DataOutputStream dataOutputStream = _outputDirFileSystem.create(new Path(stagingInputDir, Integer.toString(i)))) {
           dataOutputStream.write(StringUtil.encodeUtf8(dataFilePath.toString() + " " + i));
           dataOutputStream.flush();
         }
@@ -159,9 +158,7 @@ public class HadoopSegmentCreationJob extends SegmentCreationJob {
   protected void addDepsJarToDistributedCache(Job job)
       throws IOException {
     if (_depsJarDir != null) {
-      PinotHadoopJobPreparationHelper
-          .addDepsJarToDistributedCacheHelper(FileSystem.get(new Path(_depsJarDir).toUri(), getConf()), job,
-              new Path(_depsJarDir));
+      PinotHadoopJobPreparationHelper.addDepsJarToDistributedCacheHelper(FileSystem.get(new Path(_depsJarDir).toUri(), getConf()), job, new Path(_depsJarDir));
     }
   }
 
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentPreprocessingJob.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentPreprocessingJob.java
index 81a9902..3125b0d 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentPreprocessingJob.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/HadoopSegmentPreprocessingJob.java
@@ -96,11 +96,10 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     // Cleans up preprocessed output dir if exists
     cleanUpPreprocessedOutputs(_preprocessedOutputDir);
 
-    DataPreprocessingHelper dataPreprocessingHelper =
-        DataPreprocessingHelperFactory.generateDataPreprocessingHelper(_inputSegmentDir, _preprocessedOutputDir);
+    DataPreprocessingHelper dataPreprocessingHelper = DataPreprocessingHelperFactory.generateDataPreprocessingHelper(_inputSegmentDir, _preprocessedOutputDir);
     dataPreprocessingHelper
-        .registerConfigs(_tableConfig, _pinotTableSchema, _partitionColumn, _numPartitions, _partitionFunction,
-            _sortingColumn, _sortingColumnType, _numOutputFiles, _maxNumRecordsPerFile);
+        .registerConfigs(_tableConfig, _pinotTableSchema, _partitionColumn, _numPartitions, _partitionFunction, _sortingColumn, _sortingColumnType,
+            _numOutputFiles, _maxNumRecordsPerFile);
 
     Job job = dataPreprocessingHelper.setUpJob();
 
@@ -110,8 +109,7 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     LOGGER.info("HDFS class path: " + _pathToDependencyJar);
     if (_pathToDependencyJar != null) {
       LOGGER.info("Copying jars locally.");
-      PinotHadoopJobPreparationHelper
-          .addDepsJarToDistributedCacheHelper(HadoopUtils.DEFAULT_FILE_SYSTEM, job, _pathToDependencyJar);
+      PinotHadoopJobPreparationHelper.addDepsJarToDistributedCacheHelper(HadoopUtils.DEFAULT_FILE_SYSTEM, job, _pathToDependencyJar);
     } else {
       LOGGER.info("Property '{}' not specified.", JobConfigConstants.PATH_TO_DEPS_JAR);
     }
@@ -132,8 +130,7 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     if (customConfig != null) {
       Map<String, String> customConfigMap = customConfig.getCustomConfigs();
       if (customConfigMap != null && !customConfigMap.isEmpty()) {
-        String preprocessingOperationsString =
-            customConfigMap.getOrDefault(InternalConfigConstants.PREPROCESS_OPERATIONS, "");
+        String preprocessingOperationsString = customConfigMap.getOrDefault(InternalConfigConstants.PREPROCESS_OPERATIONS, "");
         DataPreprocessingUtils.getOperations(_preprocessingOperations, preprocessingOperationsString);
       }
     }
@@ -148,8 +145,7 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     SegmentPartitionConfig segmentPartitionConfig = _tableConfig.getIndexingConfig().getSegmentPartitionConfig();
     if (segmentPartitionConfig != null) {
       Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
-      Preconditions
-          .checkArgument(columnPartitionMap.size() <= 1, "There should be at most 1 partition setting in the table.");
+      Preconditions.checkArgument(columnPartitionMap.size() <= 1, "There should be at most 1 partition setting in the table.");
       if (columnPartitionMap.size() == 1) {
         _partitionColumn = columnPartitionMap.keySet().iterator().next();
         _numPartitions = segmentPartitionConfig.getNumPartitions(_partitionColumn);
@@ -190,12 +186,9 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
         _sortingColumn = sortedColumns.get(0);
         FieldSpec fieldSpec = _pinotTableSchema.getFieldSpecFor(_sortingColumn);
         Preconditions.checkState(fieldSpec != null, "Failed to find sorting column: {} in the schema", _sortingColumn);
-        Preconditions
-            .checkState(fieldSpec.isSingleValueField(), "Cannot sort on multi-value column: %s", _sortingColumn);
+        Preconditions.checkState(fieldSpec.isSingleValueField(), "Cannot sort on multi-value column: %s", _sortingColumn);
         _sortingColumnType = fieldSpec.getDataType();
-        Preconditions
-            .checkState(_sortingColumnType.canBeASortedColumn(), "Cannot sort on %s column: %s", _sortingColumnType,
-                _sortingColumn);
+        Preconditions.checkState(_sortingColumnType.canBeASortedColumn(), "Cannot sort on %s column: %s", _sortingColumnType, _sortingColumn);
         LOGGER.info("Sorting the data with column: {} of type: {}", _sortingColumn, _sortingColumnType);
       }
     }
@@ -214,9 +207,8 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     Map<String, String> customConfigsMap = tableCustomConfig.getCustomConfigs();
     if (customConfigsMap != null && customConfigsMap.containsKey(InternalConfigConstants.PREPROCESSING_NUM_REDUCERS)) {
       _numOutputFiles = Integer.parseInt(customConfigsMap.get(InternalConfigConstants.PREPROCESSING_NUM_REDUCERS));
-      Preconditions.checkState(_numOutputFiles > 0, String
-          .format("The value of %s should be positive! Current value: %s",
-              InternalConfigConstants.PREPROCESSING_NUM_REDUCERS, _numOutputFiles));
+      Preconditions.checkState(_numOutputFiles > 0,
+          String.format("The value of %s should be positive! Current value: %s", InternalConfigConstants.PREPROCESSING_NUM_REDUCERS, _numOutputFiles));
     } else {
       _numOutputFiles = 0;
     }
@@ -224,21 +216,18 @@ public class HadoopSegmentPreprocessingJob extends SegmentPreprocessingJob {
     if (customConfigsMap != null) {
       int maxNumRecords;
       if (customConfigsMap.containsKey(InternalConfigConstants.PARTITION_MAX_RECORDS_PER_FILE)) {
-        LOGGER.warn("The config: {} from custom config is deprecated. Use {} instead.",
-            InternalConfigConstants.PARTITION_MAX_RECORDS_PER_FILE,
+        LOGGER.warn("The config: {} from custom config is deprecated. Use {} instead.", InternalConfigConstants.PARTITION_MAX_RECORDS_PER_FILE,
             InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE);
         maxNumRecords = Integer.parseInt(customConfigsMap.get(InternalConfigConstants.PARTITION_MAX_RECORDS_PER_FILE));
       } else if (customConfigsMap.containsKey(InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE)) {
-        maxNumRecords =
-            Integer.parseInt(customConfigsMap.get(InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE));
+        maxNumRecords = Integer.parseInt(customConfigsMap.get(InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE));
       } else {
         return;
       }
       // TODO: add a in-built maximum value for this config to avoid having too many small files.
       // E.g. if the config is set to 1 which is smaller than this in-built value, the job should be abort from generating too many small files.
       Preconditions.checkArgument(maxNumRecords > 0,
-          "The value of " + InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE
-              + " should be positive. Current value: " + maxNumRecords);
+          "The value of " + InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE + " should be positive. Current value: " + maxNumRecords);
       LOGGER.info("Setting {} to {}", InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE, maxNumRecords);
       _maxNumRecordsPerFile = maxNumRecords;
     }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/InternalConfigConstants.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/InternalConfigConstants.java
index 3701db2..ef898e3 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/InternalConfigConstants.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/InternalConfigConstants.java
@@ -23,6 +23,9 @@ package org.apache.pinot.hadoop.job;
  * jobs. They are not meant to be set externally.
  */
 public class InternalConfigConstants {
+  private InternalConfigConstants() {
+  }
+
   public static final String TIME_COLUMN_CONFIG = "time.column";
   public static final String TIME_COLUMN_VALUE = "time.column.value";
   public static final String IS_APPEND = "is.append";
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/AvroDataPreprocessingMapper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/AvroDataPreprocessingMapper.java
index 6278e8e..05b2a27 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/AvroDataPreprocessingMapper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/AvroDataPreprocessingMapper.java
@@ -50,8 +50,7 @@ public class AvroDataPreprocessingMapper extends Mapper<AvroKey<GenericRecord>,
     if (sortingColumnConfig != null) {
       _sortingColumn = sortingColumnConfig;
       _sortingColumnType = FieldSpec.DataType.valueOf(configuration.get(InternalConfigConstants.SORTING_COLUMN_TYPE));
-      LOGGER.info("Initialized AvroDataPreprocessingMapper with sortingColumn: {} of type: {}", _sortingColumn,
-          _sortingColumnType);
+      LOGGER.info("Initialized AvroDataPreprocessingMapper with sortingColumn: {} of type: {}", _sortingColumn, _sortingColumnType);
     } else {
       LOGGER.info("Initialized AvroDataPreprocessingMapper without sorting column");
     }
@@ -63,19 +62,14 @@ public class AvroDataPreprocessingMapper extends Mapper<AvroKey<GenericRecord>,
     GenericRecord record = key.datum();
     if (_sortingColumn != null) {
       Object object = record.get(_sortingColumn);
-      Preconditions
-          .checkState(object != null, "Failed to find value for sorting column: %s in record: %s", _sortingColumn,
-              record);
+      Preconditions.checkState(object != null, "Failed to find value for sorting column: %s in record: %s", _sortingColumn, record);
       Object convertedValue = _avroRecordExtractor.convert(object);
-      Preconditions.checkState(convertedValue != null, "Invalid value: %s for sorting column: %s in record: %s", object,
-          _sortingColumn, record);
+      Preconditions.checkState(convertedValue != null, "Invalid value: %s for sorting column: %s in record: %s", object, _sortingColumn, record);
       WritableComparable outputKey;
       try {
         outputKey = DataPreprocessingUtils.convertToWritableComparable(convertedValue, _sortingColumnType);
       } catch (Exception e) {
-        throw new IllegalStateException(
-            String.format("Caught exception while processing sorting column: %s in record: %s", _sortingColumn, record),
-            e);
+        throw new IllegalStateException(String.format("Caught exception while processing sorting column: %s in record: %s", _sortingColumn, record), e);
       }
       context.write(outputKey, new AvroValue<>(record));
     } else {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/OrcDataPreprocessingMapper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/OrcDataPreprocessingMapper.java
index d7d0694..6c0e25d 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/OrcDataPreprocessingMapper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/OrcDataPreprocessingMapper.java
@@ -50,8 +50,7 @@ public class OrcDataPreprocessingMapper extends Mapper<NullWritable, OrcStruct,
     if (sortingColumnConfig != null) {
       _sortingColumn = sortingColumnConfig;
       _sortingColumnType = FieldSpec.DataType.valueOf(configuration.get(InternalConfigConstants.SORTING_COLUMN_TYPE));
-      LOGGER.info("Initialized OrcDataPreprocessingMapper with sortingColumn: {} of type: {}", _sortingColumn,
-          _sortingColumnType);
+      LOGGER.info("Initialized OrcDataPreprocessingMapper with sortingColumn: {} of type: {}", _sortingColumn, _sortingColumnType);
     } else {
       LOGGER.info("Initialized OrcDataPreprocessingMapper without sorting column");
     }
@@ -65,19 +64,16 @@ public class OrcDataPreprocessingMapper extends Mapper<NullWritable, OrcStruct,
       if (_sortingColumnId == -1) {
         List<String> fieldNames = value.getSchema().getFieldNames();
         _sortingColumnId = fieldNames.indexOf(_sortingColumn);
-        Preconditions.checkState(_sortingColumnId != -1, "Failed to find sorting column: %s in the ORC fields: %s",
-            _sortingColumn, fieldNames);
+        Preconditions.checkState(_sortingColumnId != -1, "Failed to find sorting column: %s in the ORC fields: %s", _sortingColumn, fieldNames);
         LOGGER.info("Field id for sorting column: {} is: {}", _sortingColumn, _sortingColumnId);
       }
       WritableComparable sortingColumnValue = value.getFieldValue(_sortingColumnId);
       WritableComparable outputKey;
       try {
-        outputKey = DataPreprocessingUtils
-            .convertToWritableComparable(OrcUtils.convert(sortingColumnValue), _sortingColumnType);
+        outputKey = DataPreprocessingUtils.convertToWritableComparable(OrcUtils.convert(sortingColumnValue), _sortingColumnType);
       } catch (Exception e) {
-        throw new IllegalStateException(String
-            .format("Caught exception while processing sorting column: %s, id: %d in ORC struct: %s", _sortingColumn,
-                _sortingColumnId, value), e);
+        throw new IllegalStateException(
+            String.format("Caught exception while processing sorting column: %s, id: %d in ORC struct: %s", _sortingColumn, _sortingColumnId, value), e);
       }
       context.write(outputKey, _valueWrapper);
     } else {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentCreationMapper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentCreationMapper.java
index 6927d5d..2919219 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentCreationMapper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentCreationMapper.java
@@ -140,16 +140,13 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
     setFlagForSchemaMismatch();
 
     // Set up segment name generator
-    String segmentNameGeneratorType =
-        _jobConf.get(JobConfigConstants.SEGMENT_NAME_GENERATOR_TYPE, JobConfigConstants.DEFAULT_SEGMENT_NAME_GENERATOR);
+    String segmentNameGeneratorType = _jobConf.get(JobConfigConstants.SEGMENT_NAME_GENERATOR_TYPE, JobConfigConstants.DEFAULT_SEGMENT_NAME_GENERATOR);
     switch (segmentNameGeneratorType) {
       case JobConfigConstants.SIMPLE_SEGMENT_NAME_GENERATOR:
-        _segmentNameGenerator =
-            new SimpleSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_POSTFIX));
+        _segmentNameGenerator = new SimpleSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_POSTFIX));
         break;
       case JobConfigConstants.NORMALIZED_DATE_SEGMENT_NAME_GENERATOR:
-        Preconditions.checkState(_tableConfig != null,
-            "In order to use NormalizedDateSegmentNameGenerator, table config must be provided");
+        Preconditions.checkState(_tableConfig != null, "In order to use NormalizedDateSegmentNameGenerator, table config must be provided");
         SegmentsValidationAndRetentionConfig validationConfig = _tableConfig.getValidationConfig();
         DateTimeFormatSpec dateTimeFormatSpec = null;
         String timeColumnName = _tableConfig.getValidationConfig().getTimeColumnName();
@@ -160,11 +157,9 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
             dateTimeFormatSpec = new DateTimeFormatSpec(dateTimeFieldSpec.getFormat());
           }
         }
-        _segmentNameGenerator =
-            new NormalizedDateSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_PREFIX),
-                _jobConf.getBoolean(JobConfigConstants.EXCLUDE_SEQUENCE_ID, false),
-                IngestionConfigUtils.getBatchSegmentIngestionType(_tableConfig),
-                IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig), dateTimeFormatSpec);
+        _segmentNameGenerator = new NormalizedDateSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_PREFIX),
+            _jobConf.getBoolean(JobConfigConstants.EXCLUDE_SEQUENCE_ID, false), IngestionConfigUtils.getBatchSegmentIngestionType(_tableConfig),
+            IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig), dateTimeFormatSpec);
         break;
       default:
         throw new UnsupportedOperationException("Unsupported segment name generator type: " + segmentNameGeneratorType);
@@ -181,8 +176,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
       _logger.warn("Deleting existing file: {}", _localStagingDir);
       FileUtils.forceDelete(_localStagingDir);
     }
-    _logger
-        .info("Making local temporary directories: {}, {}, {}", _localStagingDir, _localInputDir, _localSegmentTarDir);
+    _logger.info("Making local temporary directories: {}, {}, {}", _localStagingDir, _localInputDir, _localSegmentTarDir);
     Preconditions.checkState(_localStagingDir.mkdirs());
     Preconditions.checkState(_localInputDir.mkdir());
     Preconditions.checkState(_localSegmentDir.mkdir());
@@ -237,8 +231,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
     String inputFileName = hdfsInputFile.getName();
     File localInputFile = new File(_localInputDir, inputFileName);
     _logger.info("Copying input file from: {} to: {}", hdfsInputFile, localInputFile);
-    FileSystem.get(hdfsInputFile.toUri(), _jobConf)
-        .copyToLocalFile(hdfsInputFile, new Path(localInputFile.getAbsolutePath()));
+    FileSystem.get(hdfsInputFile.toUri(), _jobConf).copyToLocalFile(hdfsInputFile, new Path(localInputFile.getAbsolutePath()));
 
     SegmentGeneratorConfig segmentGeneratorConfig = new SegmentGeneratorConfig(_tableConfig, _schema);
     segmentGeneratorConfig.setTableName(_rawTableName);
@@ -273,8 +266,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
       validateSchema(driver.getIngestionSchemaValidator());
       driver.build();
     } catch (Exception e) {
-      _logger.error("Caught exception while creating segment with HDFS input file: {}, sequence id: {}", hdfsInputFile,
-          sequenceId, e);
+      _logger.error("Caught exception while creating segment with HDFS input file: {}, sequence id: {}", hdfsInputFile, sequenceId, e);
       throw new RuntimeException(e);
     } finally {
       progressReporterThread.interrupt();
@@ -294,23 +286,20 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
 
     long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir);
     long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile);
-    _logger.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName,
-        DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize));
+    _logger.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName, DataSizeUtils.fromBytes(uncompressedSegmentSize),
+        DataSizeUtils.fromBytes(compressedSegmentSize));
 
     Path hdfsSegmentTarFile = new Path(_hdfsSegmentTarDir, segmentTarFileName);
     if (_useRelativePath) {
       Path relativeOutputPath =
-          getRelativeOutputPath(new Path(_jobConf.get(JobConfigConstants.PATH_TO_INPUT)).toUri(), hdfsInputFile.toUri(),
-              _hdfsSegmentTarDir);
+          getRelativeOutputPath(new Path(_jobConf.get(JobConfigConstants.PATH_TO_INPUT)).toUri(), hdfsInputFile.toUri(), _hdfsSegmentTarDir);
       hdfsSegmentTarFile = new Path(relativeOutputPath, segmentTarFileName);
     }
     _logger.info("Copying segment tar file from: {} to: {}", localSegmentTarFile, hdfsSegmentTarFile);
-    FileSystem.get(hdfsSegmentTarFile.toUri(), _jobConf)
-        .copyFromLocalFile(true, true, new Path(localSegmentTarFile.getAbsolutePath()), hdfsSegmentTarFile);
+    FileSystem.get(hdfsSegmentTarFile.toUri(), _jobConf).copyFromLocalFile(true, true, new Path(localSegmentTarFile.getAbsolutePath()), hdfsSegmentTarFile);
 
     context.write(new LongWritable(sequenceId), new Text(segmentTarFileName));
-    _logger.info("Finish generating segment: {} with HDFS input file: {}, sequence id: {}", segmentName, hdfsInputFile,
-        sequenceId);
+    _logger.info("Finish generating segment: {} with HDFS input file: {}, sequence id: {}", segmentName, hdfsInputFile, sequenceId);
   }
 
   protected FileFormat getFileFormat(String fileName) {
@@ -342,8 +331,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
       }
       if (fileFormat == FileFormat.THRIFT) {
         try (InputStream inputStream = FileSystem.get(_readerConfigFile.toUri(), _jobConf).open(_readerConfigFile)) {
-          ThriftRecordReaderConfig readerConfig =
-              JsonUtils.inputStreamToObject(inputStream, ThriftRecordReaderConfig.class);
+          ThriftRecordReaderConfig readerConfig = JsonUtils.inputStreamToObject(inputStream, ThriftRecordReaderConfig.class);
           _logger.info("Using Thrift record reader config: {}", readerConfig);
           return readerConfig;
         }
@@ -351,8 +339,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
 
       if (fileFormat == FileFormat.PROTO) {
         try (InputStream inputStream = FileSystem.get(_readerConfigFile.toUri(), _jobConf).open(_readerConfigFile)) {
-          ProtoBufRecordReaderConfig readerConfig =
-              JsonUtils.inputStreamToObject(inputStream, ProtoBufRecordReaderConfig.class);
+          ProtoBufRecordReaderConfig readerConfig = JsonUtils.inputStreamToObject(inputStream, ProtoBufRecordReaderConfig.class);
           _logger.info("Using Protocol Buffer record reader config: {}", readerConfig);
           return readerConfig;
         }
@@ -369,8 +356,7 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
    * Can be overridden to set additional segment generator configs.
    */
   @SuppressWarnings("unused")
-  protected void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig, Path hdfsInputFile,
-      int sequenceId) {
+  protected void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig, Path hdfsInputFile, int sequenceId) {
   }
 
   private void setFlagForSchemaMismatch() {
@@ -415,17 +401,14 @@ public class SegmentCreationMapper extends Mapper<LongWritable, Text, LongWritab
   }
 
   private boolean isSchemaMismatch() {
-    return _dataTypeMismatch + _singleValueMultiValueFieldMismatch + _multiValueStructureMismatch + _missingPinotColumn
-        != 0;
+    return _dataTypeMismatch + _singleValueMultiValueFieldMismatch + _multiValueStructureMismatch + _missingPinotColumn != 0;
   }
 
   @Override
   public void cleanup(Context context) {
     context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.DATA_TYPE_MISMATCH).increment(_dataTypeMismatch);
-    context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.SINGLE_VALUE_MULTI_VALUE_FIELD_MISMATCH)
-        .increment(_singleValueMultiValueFieldMismatch);
-    context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.MULTI_VALUE_FIELD_STRUCTURE_MISMATCH)
-        .increment(_multiValueStructureMismatch);
+    context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.SINGLE_VALUE_MULTI_VALUE_FIELD_MISMATCH).increment(_singleValueMultiValueFieldMismatch);
+    context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.MULTI_VALUE_FIELD_STRUCTURE_MISMATCH).increment(_multiValueStructureMismatch);
     context.getCounter(SegmentCreationJob.SchemaMisMatchCounter.MISSING_PINOT_COLUMN).increment(_missingPinotColumn);
     _logger.info("Deleting local temporary directory: {}", _localStagingDir);
     FileUtils.deleteQuietly(_localStagingDir);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentPreprocessingMapper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentPreprocessingMapper.java
index 3d3fcec..2b7a5c5 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentPreprocessingMapper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/mappers/SegmentPreprocessingMapper.java
@@ -73,11 +73,9 @@ public class SegmentPreprocessingMapper extends Mapper<AvroKey<GenericRecord>, N
       if (timeFormat.equals(DateTimeFieldSpec.TimeFormat.EPOCH.toString())) {
         dateTimeFormatSpec = new DateTimeFormatSpec(1, timeType, timeFormat);
       } else {
-        dateTimeFormatSpec = new DateTimeFormatSpec(1, timeType, timeFormat,
-            _jobConf.get(InternalConfigConstants.SEGMENT_TIME_SDF_PATTERN));
+        dateTimeFormatSpec = new DateTimeFormatSpec(1, timeType, timeFormat, _jobConf.get(InternalConfigConstants.SEGMENT_TIME_SDF_PATTERN));
       }
-      _normalizedDateSegmentNameGenerator =
-          new NormalizedDateSegmentNameGenerator(tableName, null, false, "APPEND", pushFrequency, dateTimeFormatSpec);
+      _normalizedDateSegmentNameGenerator = new NormalizedDateSegmentNameGenerator(tableName, null, false, "APPEND", pushFrequency, dateTimeFormatSpec);
       _sampleNormalizedTimeColumnValue = _normalizedDateSegmentNameGenerator.getNormalizedDate(timeColumnValue);
     }
 
@@ -105,8 +103,7 @@ public class SegmentPreprocessingMapper extends Mapper<AvroKey<GenericRecord>, N
         _firstInstanceOfMismatchedTime = false;
         // TODO: Create a custom exception and gracefully catch this exception outside, changing what the path to input
         // into segment creation should be
-        LOGGER.warn("This segment contains multiple time units. Sample is {}, current is {}",
-            _sampleNormalizedTimeColumnValue, normalizedTimeColumnValue);
+        LOGGER.warn("This segment contains multiple time units. Sample is {}, current is {}", _sampleNormalizedTimeColumnValue, normalizedTimeColumnValue);
       }
     }
 
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/AvroDataPreprocessingPartitioner.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/AvroDataPreprocessingPartitioner.java
index 74799c7..d817e67 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/AvroDataPreprocessingPartitioner.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/AvroDataPreprocessingPartitioner.java
@@ -48,9 +48,8 @@ public class AvroDataPreprocessingPartitioner extends Partitioner<WritableCompar
     String partitionFunctionName = conf.get(InternalConfigConstants.PARTITION_FUNCTION_CONFIG);
     int numPartitions = Integer.parseInt(conf.get(InternalConfigConstants.NUM_PARTITIONS_CONFIG));
     _partitionFunction = PartitionFunctionFactory.getPartitionFunction(partitionFunctionName, numPartitions);
-    LOGGER.info(
-        "Initialized AvroDataPreprocessingPartitioner with partitionColumn: {}, partitionFunction: {}, numPartitions: {}",
-        _partitionColumn, partitionFunctionName, numPartitions);
+    LOGGER.info("Initialized AvroDataPreprocessingPartitioner with partitionColumn: {}, partitionFunction: {}, numPartitions: {}", _partitionColumn,
+        partitionFunctionName, numPartitions);
   }
 
   @Override
@@ -62,15 +61,11 @@ public class AvroDataPreprocessingPartitioner extends Partitioner<WritableCompar
   public int getPartition(WritableComparable key, AvroValue<GenericRecord> value, int numPartitions) {
     GenericRecord record = value.datum();
     Object object = record.get(_partitionColumn);
-    Preconditions
-        .checkState(object != null, "Failed to find value for partition column: %s in record: %s", _partitionColumn,
-            record);
+    Preconditions.checkState(object != null, "Failed to find value for partition column: %s in record: %s", _partitionColumn, record);
     Object convertedValue = _avroRecordExtractor.convert(object);
-    Preconditions.checkState(convertedValue != null, "Invalid value: %s for partition column: %s in record: %s", object,
-        _partitionColumn, record);
+    Preconditions.checkState(convertedValue != null, "Invalid value: %s for partition column: %s in record: %s", object, _partitionColumn, record);
     Preconditions.checkState(convertedValue instanceof Number || convertedValue instanceof String,
-        "Value for partition column: %s must be either a Number or a String, found: %s in record: %s", _partitionColumn,
-        convertedValue.getClass(), record);
+        "Value for partition column: %s must be either a Number or a String, found: %s in record: %s", _partitionColumn, convertedValue.getClass(), record);
     // NOTE: Always partition with String type value because Broker uses String type value to prune segments
     return _partitionFunction.getPartition(convertedValue.toString());
   }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/GenericPartitioner.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/GenericPartitioner.java
index 7ca22cf..66a37ce 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/GenericPartitioner.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/GenericPartitioner.java
@@ -45,8 +45,7 @@ public class GenericPartitioner<T> extends Partitioner<T, AvroValue<GenericRecor
     _configuration = conf;
     _partitionColumn = _configuration.get(PARTITION_COLUMN_CONFIG);
     _numPartitions = Integer.parseInt(_configuration.get(NUM_PARTITIONS_CONFIG));
-    _partitionFunction = PartitionFunctionFactory
-        .getPartitionFunction(_configuration.get(PARTITION_FUNCTION_CONFIG, null), _numPartitions);
+    _partitionFunction = PartitionFunctionFactory.getPartitionFunction(_configuration.get(PARTITION_FUNCTION_CONFIG, null), _numPartitions);
 
     LOGGER.info("The partition function is: " + _partitionFunction.getClass().getName());
     LOGGER.info("The partition column is: " + _partitionColumn);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/OrcDataPreprocessingPartitioner.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/OrcDataPreprocessingPartitioner.java
index bef2cef..4faac24 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/OrcDataPreprocessingPartitioner.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/partitioners/OrcDataPreprocessingPartitioner.java
@@ -48,9 +48,8 @@ public class OrcDataPreprocessingPartitioner extends Partitioner<WritableCompara
     String partitionFunctionName = conf.get(InternalConfigConstants.PARTITION_FUNCTION_CONFIG);
     int numPartitions = Integer.parseInt(conf.get(InternalConfigConstants.NUM_PARTITIONS_CONFIG));
     _partitionFunction = PartitionFunctionFactory.getPartitionFunction(partitionFunctionName, numPartitions);
-    LOGGER.info(
-        "Initialized OrcDataPreprocessingPartitioner with partitionColumn: {}, partitionFunction: {}, numPartitions: {}",
-        _partitionColumn, partitionFunctionName, numPartitions);
+    LOGGER.info("Initialized OrcDataPreprocessingPartitioner with partitionColumn: {}, partitionFunction: {}, numPartitions: {}", _partitionColumn,
+        partitionFunctionName, numPartitions);
   }
 
   @Override
@@ -64,8 +63,7 @@ public class OrcDataPreprocessingPartitioner extends Partitioner<WritableCompara
     if (_partitionColumnId == -1) {
       List<String> fieldNames = orcStruct.getSchema().getFieldNames();
       _partitionColumnId = fieldNames.indexOf(_partitionColumn);
-      Preconditions.checkState(_partitionColumnId != -1, "Failed to find partition column: %s in the ORC fields: %s",
-          _partitionColumn, fieldNames);
+      Preconditions.checkState(_partitionColumnId != -1, "Failed to find partition column: %s in the ORC fields: %s", _partitionColumn, fieldNames);
       LOGGER.info("Field id for partition column: {} is: {}", _partitionColumn, _partitionColumnId);
     }
     WritableComparable partitionColumnValue = orcStruct.getFieldValue(_partitionColumnId);
@@ -73,9 +71,9 @@ public class OrcDataPreprocessingPartitioner extends Partitioner<WritableCompara
     try {
       convertedValue = OrcUtils.convert(partitionColumnValue);
     } catch (Exception e) {
-      throw new IllegalStateException(String
-          .format("Caught exception while processing partition column: %s, id: %d in ORC struct: %s", _partitionColumn,
-              _partitionColumnId, orcStruct), e);
+      throw new IllegalStateException(
+          String.format("Caught exception while processing partition column: %s, id: %d in ORC struct: %s", _partitionColumn, _partitionColumnId, orcStruct),
+          e);
     }
     // NOTE: Always partition with String type value because Broker uses String type value to prune segments
     return _partitionFunction.getPartition(convertedValue.toString());
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/AvroDataPreprocessingHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/AvroDataPreprocessingHelper.java
index 9e5f5f2..5b6a2f0 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/AvroDataPreprocessingHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/AvroDataPreprocessingHelper.java
@@ -136,14 +136,12 @@ public class AvroDataPreprocessingHelper extends DataPreprocessingHelper {
     if (_partitionColumn != null) {
       Preconditions.checkArgument(schema.getField(_partitionColumn) != null,
           String.format("Partition column: %s is not found from the schema of input files.", _partitionColumn));
-      Preconditions.checkArgument(_numPartitions > 0,
-          String.format("Number of partitions should be positive. Current value: %s", _numPartitions));
+      Preconditions.checkArgument(_numPartitions > 0, String.format("Number of partitions should be positive. Current value: %s", _numPartitions));
       Preconditions.checkArgument(_partitionFunction != null, "Partition function should not be null!");
       try {
         PartitionFunctionFactory.PartitionFunctionType.fromString(_partitionFunction);
       } catch (IllegalArgumentException e) {
-        LOGGER.error("Partition function needs to be one of Modulo, Murmur, ByteArray, HashCode, it is currently {}",
-            _partitionColumn);
+        LOGGER.error("Partition function needs to be one of Modulo, Murmur, ByteArray, HashCode, it is currently {}", _partitionColumn);
         throw new IllegalArgumentException(e);
       }
     }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelper.java
index a505d09..4a80b7c 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelper.java
@@ -78,9 +78,8 @@ public abstract class DataPreprocessingHelper {
     _outputPath = outputPath;
   }
 
-  public void registerConfigs(TableConfig tableConfig, Schema tableSchema, String partitionColumn, int numPartitions,
-      String partitionFunction, String sortingColumn, FieldSpec.DataType sortingColumnType, int numOutputFiles,
-      int maxNumRecordsPerFile) {
+  public void registerConfigs(TableConfig tableConfig, Schema tableSchema, String partitionColumn, int numPartitions, String partitionFunction,
+      String sortingColumn, FieldSpec.DataType sortingColumnType, int numOutputFiles, int maxNumRecordsPerFile) {
     _tableConfig = tableConfig;
     _pinotTableSchema = tableSchema;
     _partitionColumn = partitionColumn;
@@ -159,8 +158,7 @@ public abstract class DataPreprocessingHelper {
       }
     }
     // Maximum number of records per output file
-    jobConf
-        .set(InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE, Integer.toString(_maxNumRecordsPerFile));
+    jobConf.set(InternalConfigConstants.PREPROCESSING_MAX_NUM_RECORDS_PER_FILE, Integer.toString(_maxNumRecordsPerFile));
     // Number of reducers
     LOGGER.info("Number of reduce tasks for pre-processing job: {}", numReduceTasks);
     job.setNumReduceTasks(numReduceTasks);
@@ -195,13 +193,11 @@ public abstract class DataPreprocessingHelper {
         if (dateTimeFieldSpec != null) {
           DateTimeFormatSpec formatSpec = new DateTimeFormatSpec(dateTimeFieldSpec.getFormat());
           job.getConfiguration().set(InternalConfigConstants.SEGMENT_TIME_TYPE, formatSpec.getColumnUnit().toString());
-          job.getConfiguration()
-              .set(InternalConfigConstants.SEGMENT_TIME_FORMAT, formatSpec.getTimeFormat().toString());
+          job.getConfiguration().set(InternalConfigConstants.SEGMENT_TIME_FORMAT, formatSpec.getTimeFormat().toString());
           job.getConfiguration().set(InternalConfigConstants.SEGMENT_TIME_SDF_PATTERN, formatSpec.getSDFPattern());
         }
       }
-      job.getConfiguration().set(InternalConfigConstants.SEGMENT_PUSH_FREQUENCY,
-          IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig));
+      job.getConfiguration().set(InternalConfigConstants.SEGMENT_PUSH_FREQUENCY, IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig));
 
       String sampleTimeColumnValue = getSampleTimeColumnValue(timeColumnName);
       if (sampleTimeColumnValue != null) {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelperFactory.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelperFactory.java
index 2e91773..c612dac 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelperFactory.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/DataPreprocessingHelperFactory.java
@@ -28,6 +28,9 @@ import org.slf4j.LoggerFactory;
 
 
 public class DataPreprocessingHelperFactory {
+  private DataPreprocessingHelperFactory() {
+  }
+
   private static final Logger LOGGER = LoggerFactory.getLogger(DataPreprocessingHelperFactory.class);
 
   public static DataPreprocessingHelper generateDataPreprocessingHelper(Path inputPaths, Path outputPath)
@@ -37,12 +40,10 @@ public class DataPreprocessingHelperFactory {
 
     int numAvroFiles = avroFiles.size();
     int numOrcFiles = orcFiles.size();
-    Preconditions.checkState(numAvroFiles == 0 || numOrcFiles == 0,
-        "Cannot preprocess mixed AVRO files: %s and ORC files: %s in directories: %s", avroFiles, orcFiles,
-        inputPaths);
     Preconditions
-        .checkState(numAvroFiles > 0 || numOrcFiles > 0, "Failed to find any AVRO or ORC file in directories: %s",
+        .checkState(numAvroFiles == 0 || numOrcFiles == 0, "Cannot preprocess mixed AVRO files: %s and ORC files: %s in directories: %s", avroFiles, orcFiles,
             inputPaths);
+    Preconditions.checkState(numAvroFiles > 0 || numOrcFiles > 0, "Failed to find any AVRO or ORC file in directories: %s", inputPaths);
 
     if (numAvroFiles > 0) {
       LOGGER.info("Found AVRO files: {} in directories: {}", avroFiles, inputPaths);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/OrcDataPreprocessingHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/OrcDataPreprocessingHelper.java
index aec0bb0..f6398cd 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/OrcDataPreprocessingHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/job/preprocess/OrcDataPreprocessingHelper.java
@@ -89,8 +89,7 @@ public class OrcDataPreprocessingHelper extends DataPreprocessingHelper {
   @Override
   String getSampleTimeColumnValue(String timeColumnName)
       throws IOException {
-    try (Reader reader = OrcFile
-        .createReader(_sampleRawDataPath, OrcFile.readerOptions(HadoopUtils.DEFAULT_CONFIGURATION))) {
+    try (Reader reader = OrcFile.createReader(_sampleRawDataPath, OrcFile.readerOptions(HadoopUtils.DEFAULT_CONFIGURATION))) {
       Reader.Options options = new Reader.Options();
       options.range(0, 1);
       RecordReader records = reader.rows(options);
@@ -212,20 +211,18 @@ public class OrcDataPreprocessingHelper extends DataPreprocessingHelper {
     if (_partitionColumn != null) {
       Preconditions.checkArgument(fieldNames.contains(_partitionColumn),
           String.format("Partition column: %s is not found from the schema of input files.", _partitionColumn));
-      Preconditions.checkArgument(_numPartitions > 0,
-          String.format("Number of partitions should be positive. Current value: %s", _numPartitions));
+      Preconditions.checkArgument(_numPartitions > 0, String.format("Number of partitions should be positive. Current value: %s", _numPartitions));
       Preconditions.checkArgument(_partitionFunction != null, "Partition function should not be null!");
       try {
         PartitionFunctionFactory.PartitionFunctionType.fromString(_partitionFunction);
       } catch (IllegalArgumentException e) {
-        LOGGER.error("Partition function needs to be one of Modulo, Murmur, ByteArray, HashCode, it is currently {}",
-            _partitionColumn);
+        LOGGER.error("Partition function needs to be one of Modulo, Murmur, ByteArray, HashCode, it is currently {}", _partitionColumn);
         throw new IllegalArgumentException(e);
       }
     }
     if (_sortingColumn != null) {
-      Preconditions.checkArgument(fieldNames.contains(_sortingColumn),
-          String.format("Sorted column: %s is not found from the schema of input files.", _sortingColumn));
+      Preconditions
+          .checkArgument(fieldNames.contains(_sortingColumn), String.format("Sorted column: %s is not found from the schema of input files.", _sortingColumn));
     }
   }
 }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/PinotHadoopJobPreparationHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/PinotHadoopJobPreparationHelper.java
index 7103cfd..b8a4736 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/PinotHadoopJobPreparationHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/PinotHadoopJobPreparationHelper.java
@@ -28,6 +28,9 @@ import org.slf4j.LoggerFactory;
 
 
 public class PinotHadoopJobPreparationHelper {
+  private PinotHadoopJobPreparationHelper() {
+  }
+
   private static final Logger _logger = LoggerFactory.getLogger(PinotHadoopJobPreparationHelper.class);
 
   public static void addDepsJarToDistributedCacheHelper(FileSystem fileSystem, Job job, Path depsJarDir)
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/DataPreprocessingUtils.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/DataPreprocessingUtils.java
index 4cc6f75..d9bd25a 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/DataPreprocessingUtils.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/DataPreprocessingUtils.java
@@ -70,8 +70,7 @@ public class DataPreprocessingUtils {
           throw new IllegalArgumentException("Unsupported data type: " + dataType);
       }
     } else {
-      throw new IllegalArgumentException(
-          String.format("Value: %s must be either a Number or a String, found: %s", value, value.getClass()));
+      throw new IllegalArgumentException(String.format("Value: %s must be either a Number or a String, found: %s", value, value.getClass()));
     }
   }
 
@@ -86,9 +85,7 @@ public class DataPreprocessingUtils {
   }
 
   public enum Operation {
-    PARTITION,
-    SORT,
-    RESIZE;
+    PARTITION, SORT, RESIZE;
 
     public static Operation getOperation(String operationString) {
       for (Operation operation : Operation.values()) {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/OrcUtils.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/OrcUtils.java
index dcfc3b5..6164a08 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/OrcUtils.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/main/java/org/apache/pinot/hadoop/utils/preprocess/OrcUtils.java
@@ -82,7 +82,6 @@ public class OrcUtils {
     if (orcValue instanceof OrcTimestamp) {
       return ((OrcTimestamp) orcValue).getTime();
     }
-    throw new IllegalArgumentException(
-        String.format("Illegal ORC value: %s, class: %s", orcValue, orcValue.getClass()));
+    throw new IllegalArgumentException(String.format("Illegal ORC value: %s, class: %s", orcValue, orcValue.getClass()));
   }
 }
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/data/IngestionSchemaValidatorTest.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/data/IngestionSchemaValidatorTest.java
index 7f350ef..20a738f 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/data/IngestionSchemaValidatorTest.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/data/IngestionSchemaValidatorTest.java
@@ -33,19 +33,17 @@ public class IngestionSchemaValidatorTest {
   @Test
   public void testAvroIngestionSchemaValidatorForSingleValueColumns()
       throws Exception {
-    String inputFilePath = new File(Preconditions
-        .checkNotNull(IngestionSchemaValidatorTest.class.getClassLoader().getResource("data/test_sample_data.avro"))
-        .getFile()).toString();
+    String inputFilePath =
+        new File(Preconditions.checkNotNull(IngestionSchemaValidatorTest.class.getClassLoader().getResource("data/test_sample_data.avro")).getFile())
+            .toString();
     String recordReaderClassName = "org.apache.pinot.plugin.inputformat.avro.AvroRecordReader";
 
-    Schema pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG)
-        .addSingleValueDimension("column2", FieldSpec.DataType.INT)
-        .addSingleValueDimension("column3", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column7", FieldSpec.DataType.STRING)
-        .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
+    Schema pinotSchema =
+        new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG).addSingleValueDimension("column2", FieldSpec.DataType.INT)
+            .addSingleValueDimension("column3", FieldSpec.DataType.STRING).addSingleValueDimension("column7", FieldSpec.DataType.STRING)
+            .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
 
-    IngestionSchemaValidator ingestionSchemaValidator =
-        SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
+    IngestionSchemaValidator ingestionSchemaValidator = SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
     Assert.assertNotNull(ingestionSchemaValidator);
     Assert.assertFalse(ingestionSchemaValidator.getDataTypeMismatchResult().isMismatchDetected());
     Assert.assertFalse(ingestionSchemaValidator.getSingleValueMultiValueFieldMismatchResult().isMismatchDetected());
@@ -53,15 +51,12 @@ public class IngestionSchemaValidatorTest {
     Assert.assertFalse(ingestionSchemaValidator.getMissingPinotColumnResult().isMismatchDetected());
 
     // Adding one extra column
-    pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG)
-        .addSingleValueDimension("column2", FieldSpec.DataType.INT)
-        .addSingleValueDimension("column3", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("extra_column", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column7", FieldSpec.DataType.STRING)
-        .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
+    pinotSchema =
+        new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG).addSingleValueDimension("column2", FieldSpec.DataType.INT)
+            .addSingleValueDimension("column3", FieldSpec.DataType.STRING).addSingleValueDimension("extra_column", FieldSpec.DataType.STRING)
+            .addSingleValueDimension("column7", FieldSpec.DataType.STRING).addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
 
-    ingestionSchemaValidator =
-        SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
+    ingestionSchemaValidator = SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
     Assert.assertNotNull(ingestionSchemaValidator);
     Assert.assertFalse(ingestionSchemaValidator.getDataTypeMismatchResult().isMismatchDetected());
     Assert.assertFalse(ingestionSchemaValidator.getSingleValueMultiValueFieldMismatchResult().isMismatchDetected());
@@ -70,13 +65,11 @@ public class IngestionSchemaValidatorTest {
     Assert.assertNotNull(ingestionSchemaValidator.getMissingPinotColumnResult().getMismatchReason());
 
     // Change the data type of column1 from LONG to STRING
-    pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column2", FieldSpec.DataType.INT)
-        .addSingleValueDimension("column3", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column7", FieldSpec.DataType.STRING)
-        .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
-    ingestionSchemaValidator =
-        SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
+    pinotSchema =
+        new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.STRING).addSingleValueDimension("column2", FieldSpec.DataType.INT)
+            .addSingleValueDimension("column3", FieldSpec.DataType.STRING).addSingleValueDimension("column7", FieldSpec.DataType.STRING)
+            .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
+    ingestionSchemaValidator = SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
     Assert.assertNotNull(ingestionSchemaValidator);
     Assert.assertTrue(ingestionSchemaValidator.getDataTypeMismatchResult().isMismatchDetected());
     Assert.assertNotNull(ingestionSchemaValidator.getDataTypeMismatchResult().getMismatchReason());
@@ -85,13 +78,11 @@ public class IngestionSchemaValidatorTest {
     Assert.assertFalse(ingestionSchemaValidator.getMissingPinotColumnResult().isMismatchDetected());
 
     // Change column2 from single-value column to multi-value column
-    pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG)
-        .addMultiValueDimension("column2", FieldSpec.DataType.INT)
-        .addSingleValueDimension("column3", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column7", FieldSpec.DataType.STRING)
-        .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
-    ingestionSchemaValidator =
-        SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
+    pinotSchema =
+        new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.LONG).addMultiValueDimension("column2", FieldSpec.DataType.INT)
+            .addSingleValueDimension("column3", FieldSpec.DataType.STRING).addSingleValueDimension("column7", FieldSpec.DataType.STRING)
+            .addMetric("met_impressionCount", FieldSpec.DataType.LONG).build();
+    ingestionSchemaValidator = SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
     Assert.assertNotNull(ingestionSchemaValidator);
     Assert.assertFalse(ingestionSchemaValidator.getDataTypeMismatchResult().isMismatchDetected());
     Assert.assertTrue(ingestionSchemaValidator.getSingleValueMultiValueFieldMismatchResult().isMismatchDetected());
@@ -104,23 +95,20 @@ public class IngestionSchemaValidatorTest {
   @Test
   public void testAvroIngestionValidatorForMultiValueColumns()
       throws Exception {
-    String inputFilePath = new File(Preconditions.checkNotNull(
-        IngestionSchemaValidatorTest.class.getClassLoader().getResource("data/test_sample_data_multi_value.avro"))
-        .getFile()).toString();
+    String inputFilePath = new File(
+        Preconditions.checkNotNull(IngestionSchemaValidatorTest.class.getClassLoader().getResource("data/test_sample_data_multi_value.avro")).getFile())
+        .toString();
     String recordReaderClassName = "org.apache.pinot.plugin.inputformat.avro.AvroRecordReader";
 
     // column 2 is of int type in the AVRO.
     // column3 and column16 are both of array of map structure.
     // metric_not_found doesn't exist in input AVRO
-    Schema pinotSchema = new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("column2", FieldSpec.DataType.LONG)
-        .addSingleValueDimension("column3", FieldSpec.DataType.STRING)
-        .addMultiValueDimension("column16", FieldSpec.DataType.STRING)
-        .addMetric("metric_not_found", FieldSpec.DataType.LONG)
-        .addMetric("metric_nus_impressions", FieldSpec.DataType.LONG).build();
+    Schema pinotSchema =
+        new Schema.SchemaBuilder().addSingleValueDimension("column1", FieldSpec.DataType.STRING).addSingleValueDimension("column2", FieldSpec.DataType.LONG)
+            .addSingleValueDimension("column3", FieldSpec.DataType.STRING).addMultiValueDimension("column16", FieldSpec.DataType.STRING)
+            .addMetric("metric_not_found", FieldSpec.DataType.LONG).addMetric("metric_nus_impressions", FieldSpec.DataType.LONG).build();
 
-    IngestionSchemaValidator ingestionSchemaValidator =
-        SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
+    IngestionSchemaValidator ingestionSchemaValidator = SchemaValidatorFactory.getSchemaValidator(pinotSchema, recordReaderClassName, inputFilePath);
     Assert.assertNotNull(ingestionSchemaValidator);
     Assert.assertTrue(ingestionSchemaValidator.getDataTypeMismatchResult().isMismatchDetected());
     Assert.assertTrue(ingestionSchemaValidator.getSingleValueMultiValueFieldMismatchResult().isMismatchDetected());
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/io/PinotOutputFormatTest.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/io/PinotOutputFormatTest.java
index 79142b3..170781e 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/io/PinotOutputFormatTest.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-hadoop/src/test/java/org/apache/pinot/hadoop/io/PinotOutputFormatTest.java
@@ -70,10 +70,8 @@ public class PinotOutputFormatTest {
     PinotOutputFormat.setTempSegmentDir(job, tempSegmentDir.getAbsolutePath());
     TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).build();
     PinotOutputFormat.setTableConfig(job, tableConfig);
-    Schema schema =
-        new Schema.SchemaBuilder().setSchemaName(RAW_TABLE_NAME).addSingleValueDimension("id", FieldSpec.DataType.INT)
-            .addSingleValueDimension("name", FieldSpec.DataType.STRING).addMetric("salary", FieldSpec.DataType.INT)
-            .build();
+    Schema schema = new Schema.SchemaBuilder().setSchemaName(RAW_TABLE_NAME).addSingleValueDimension("id", FieldSpec.DataType.INT)
+        .addSingleValueDimension("name", FieldSpec.DataType.STRING).addMetric("salary", FieldSpec.DataType.INT).build();
     PinotOutputFormat.setSchema(job, schema);
     PinotOutputFormat.setFieldExtractorClass(job, JsonBasedFieldExtractor.class);
 
@@ -91,8 +89,7 @@ public class PinotOutputFormatTest {
 
     String segmentName = RAW_TABLE_NAME + "_0";
     File segmentDir = new File(TEMP_DIR, "segment");
-    File indexDir = TarGzCompressionUtils
-        .untar(new File(outputDir, segmentName + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION), segmentDir).get(0);
+    File indexDir = TarGzCompressionUtils.untar(new File(outputDir, segmentName + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION), segmentDir).get(0);
     RecordReader recordReader = new PinotSegmentRecordReader(indexDir, null, null);
     for (Employee record : records) {
       GenericRow row = recordReader.next();
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/pom.xml b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/pom.xml
index 8fea62a..284b698 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/common/JobConfigConstants.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/common/JobConfigConstants.java
index 9c9238e..085bb14 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/common/JobConfigConstants.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/common/JobConfigConstants.java
@@ -19,6 +19,9 @@
 package org.apache.pinot.ingestion.common;
 
 public class JobConfigConstants {
+  private JobConfigConstants() {
+  }
+
   public static final String PATH_TO_INPUT = "path.to.input";
   public static final String PATH_TO_OUTPUT = "path.to.output";
   public static final String PREPROCESS_PATH_TO_OUTPUT = "preprocess.path.to.output";
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/utils/JobPreparationHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/utils/JobPreparationHelper.java
index f33eebc..5e65b20 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/utils/JobPreparationHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-ingestion-common/src/main/java/org/apache/pinot/ingestion/utils/JobPreparationHelper.java
@@ -27,6 +27,9 @@ import org.slf4j.LoggerFactory;
 
 
 public class JobPreparationHelper {
+  private JobPreparationHelper() {
+  }
+
   private static final Logger _logger = LoggerFactory.getLogger(JobPreparationHelper.class);
 
   public static void mkdirs(FileSystem fileSystem, Path dirPath, String defaultPermissionsMask)
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/pom.xml b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/pom.xml
index f2abdc5..13db9ea 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/pom.xml
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/pom.xml
@@ -38,9 +38,6 @@
     <scala.version>2.11.11</scala.version>
     <hadoop.version>2.8.3</hadoop.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <profiles>
     <profile>
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/PinotSparkJobLauncher.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/PinotSparkJobLauncher.java
index 74bec79..f3ce1ce 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/PinotSparkJobLauncher.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/PinotSparkJobLauncher.java
@@ -28,10 +28,11 @@ import org.apache.pinot.spark.jobs.SparkSegmentUriPushJob;
 
 
 public class PinotSparkJobLauncher {
+  private PinotSparkJobLauncher() {
+  }
 
   private static final String USAGE = "usage: [job_type] [job.properties]";
-  private static final String SUPPORT_JOB_TYPES =
-      "\tsupport job types: " + Arrays.toString(PinotIngestionJobType.values());
+  private static final String SUPPORT_JOB_TYPES = "\tsupport job types: " + Arrays.toString(PinotIngestionJobType.values());
 
   private static void usage() {
     System.err.println(USAGE);
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationFunction.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationFunction.java
index f23aff7..416ba6a 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationFunction.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationFunction.java
@@ -109,16 +109,13 @@ public class SparkSegmentCreationFunction implements Serializable {
     _recordReaderPath = _jobConf.get(JobConfigConstants.RECORD_READER_PATH);
 
     // Set up segment name generator
-    String segmentNameGeneratorType =
-        _jobConf.get(JobConfigConstants.SEGMENT_NAME_GENERATOR_TYPE, JobConfigConstants.DEFAULT_SEGMENT_NAME_GENERATOR);
+    String segmentNameGeneratorType = _jobConf.get(JobConfigConstants.SEGMENT_NAME_GENERATOR_TYPE, JobConfigConstants.DEFAULT_SEGMENT_NAME_GENERATOR);
     switch (segmentNameGeneratorType) {
       case JobConfigConstants.SIMPLE_SEGMENT_NAME_GENERATOR:
-        _segmentNameGenerator =
-            new SimpleSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_POSTFIX));
+        _segmentNameGenerator = new SimpleSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_POSTFIX));
         break;
       case JobConfigConstants.NORMALIZED_DATE_SEGMENT_NAME_GENERATOR:
-        Preconditions.checkState(_tableConfig != null,
-            "In order to use NormalizedDateSegmentNameGenerator, table config must be provided");
+        Preconditions.checkState(_tableConfig != null, "In order to use NormalizedDateSegmentNameGenerator, table config must be provided");
         SegmentsValidationAndRetentionConfig validationConfig = _tableConfig.getValidationConfig();
         DateTimeFormatSpec dateTimeFormatSpec = null;
         String timeColumnName = _tableConfig.getValidationConfig().getTimeColumnName();
@@ -128,11 +125,9 @@ public class SparkSegmentCreationFunction implements Serializable {
             dateTimeFormatSpec = new DateTimeFormatSpec(dateTimeFieldSpec.getFormat());
           }
         }
-        _segmentNameGenerator =
-            new NormalizedDateSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_PREFIX),
-                _jobConf.getBoolean(JobConfigConstants.EXCLUDE_SEQUENCE_ID, false),
-                IngestionConfigUtils.getBatchSegmentIngestionType(_tableConfig),
-                IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig), dateTimeFormatSpec);
+        _segmentNameGenerator = new NormalizedDateSegmentNameGenerator(_rawTableName, _jobConf.get(JobConfigConstants.SEGMENT_NAME_PREFIX),
+            _jobConf.getBoolean(JobConfigConstants.EXCLUDE_SEQUENCE_ID, false), IngestionConfigUtils.getBatchSegmentIngestionType(_tableConfig),
+            IngestionConfigUtils.getBatchSegmentIngestionFrequency(_tableConfig), dateTimeFormatSpec);
         break;
       default:
         throw new UnsupportedOperationException("Unsupported segment name generator type: " + segmentNameGeneratorType);
@@ -149,8 +144,7 @@ public class SparkSegmentCreationFunction implements Serializable {
       _logger.warn("Deleting existing file: {}", _localStagingDir);
       FileUtils.forceDelete(_localStagingDir);
     }
-    _logger
-        .info("Making local temporary directories: {}, {}, {}", _localStagingDir, _localInputDir, _localSegmentTarDir);
+    _logger.info("Making local temporary directories: {}, {}, {}", _localStagingDir, _localInputDir, _localSegmentTarDir);
     Preconditions.checkState(_localStagingDir.mkdirs());
     Preconditions.checkState(_localInputDir.mkdir());
     Preconditions.checkState(_localSegmentDir.mkdir());
@@ -201,8 +195,7 @@ public class SparkSegmentCreationFunction implements Serializable {
     String inputFileName = hdfsInputFile.getName();
     File localInputFile = new File(_localInputDir, inputFileName);
     _logger.info("Copying input file from: {} to: {}", hdfsInputFile, localInputFile);
-    FileSystem.get(hdfsInputFile.toUri(), _jobConf)
-        .copyToLocalFile(hdfsInputFile, new Path(localInputFile.getAbsolutePath()));
+    FileSystem.get(hdfsInputFile.toUri(), _jobConf).copyToLocalFile(hdfsInputFile, new Path(localInputFile.getAbsolutePath()));
 
     SegmentGeneratorConfig segmentGeneratorConfig = new SegmentGeneratorConfig(_tableConfig, _schema);
     segmentGeneratorConfig.setTableName(_rawTableName);
@@ -229,8 +222,7 @@ public class SparkSegmentCreationFunction implements Serializable {
       driver.init(segmentGeneratorConfig);
       driver.build();
     } catch (Exception e) {
-      _logger.error("Caught exception while creating segment with HDFS input file: {}, sequence id: {}", hdfsInputFile,
-          sequenceId, e);
+      _logger.error("Caught exception while creating segment with HDFS input file: {}, sequence id: {}", hdfsInputFile, sequenceId, e);
       throw new RuntimeException(e);
     }
     String segmentName = driver.getSegmentName();
@@ -244,22 +236,19 @@ public class SparkSegmentCreationFunction implements Serializable {
 
     long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir);
     long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile);
-    _logger.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName,
-        DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize));
+    _logger.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName, DataSizeUtils.fromBytes(uncompressedSegmentSize),
+        DataSizeUtils.fromBytes(compressedSegmentSize));
 
     Path hdfsSegmentTarFile = new Path(_hdfsSegmentTarDir, segmentTarFileName);
     if (_useRelativePath) {
       Path relativeOutputPath =
-          getRelativeOutputPath(new Path(_jobConf.get(JobConfigConstants.PATH_TO_INPUT)).toUri(), hdfsInputFile.toUri(),
-              _hdfsSegmentTarDir);
+          getRelativeOutputPath(new Path(_jobConf.get(JobConfigConstants.PATH_TO_INPUT)).toUri(), hdfsInputFile.toUri(), _hdfsSegmentTarDir);
       hdfsSegmentTarFile = new Path(relativeOutputPath, segmentTarFileName);
     }
     _logger.info("Copying segment tar file from: {} to: {}", localSegmentTarFile, hdfsSegmentTarFile);
-    FileSystem.get(hdfsSegmentTarFile.toUri(), _jobConf)
-        .copyFromLocalFile(true, true, new Path(localSegmentTarFile.getAbsolutePath()), hdfsSegmentTarFile);
+    FileSystem.get(hdfsSegmentTarFile.toUri(), _jobConf).copyFromLocalFile(true, true, new Path(localSegmentTarFile.getAbsolutePath()), hdfsSegmentTarFile);
 
-    _logger.info("Finish generating segment: {} with HDFS input file: {}, sequence id: {}", segmentName, hdfsInputFile,
-        sequenceId);
+    _logger.info("Finish generating segment: {} with HDFS input file: {}, sequence id: {}", segmentName, hdfsInputFile, sequenceId);
   }
 
   protected FileFormat getFileFormat(String fileName) {
@@ -291,8 +280,7 @@ public class SparkSegmentCreationFunction implements Serializable {
       }
       if (fileFormat == FileFormat.THRIFT) {
         try (InputStream inputStream = FileSystem.get(_readerConfigFile.toUri(), _jobConf).open(_readerConfigFile)) {
-          ThriftRecordReaderConfig readerConfig =
-              JsonUtils.inputStreamToObject(inputStream, ThriftRecordReaderConfig.class);
+          ThriftRecordReaderConfig readerConfig = JsonUtils.inputStreamToObject(inputStream, ThriftRecordReaderConfig.class);
           _logger.info("Using Thrift record reader config: {}", readerConfig);
           return readerConfig;
         }
@@ -300,8 +288,7 @@ public class SparkSegmentCreationFunction implements Serializable {
 
       if (fileFormat == FileFormat.PROTO) {
         try (InputStream inputStream = FileSystem.get(_readerConfigFile.toUri(), _jobConf).open(_readerConfigFile)) {
-          ProtoBufRecordReaderConfig readerConfig =
-              JsonUtils.inputStreamToObject(inputStream, ProtoBufRecordReaderConfig.class);
+          ProtoBufRecordReaderConfig readerConfig = JsonUtils.inputStreamToObject(inputStream, ProtoBufRecordReaderConfig.class);
           _logger.info("Using Protocol Buffer record reader config: {}", readerConfig);
           return readerConfig;
         }
@@ -314,8 +301,7 @@ public class SparkSegmentCreationFunction implements Serializable {
    * Can be overridden to set additional segment generator configs.
    */
   @SuppressWarnings("unused")
-  protected void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig, Path hdfsInputFile,
-      int sequenceId) {
+  protected void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig, Path hdfsInputFile, int sequenceId) {
   }
 
   public void cleanup() {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationJob.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationJob.java
index d3bc6c4..6bccacd 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationJob.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentCreationJob.java
@@ -56,8 +56,7 @@ public class SparkSegmentCreationJob extends SegmentCreationJob {
    * Can be overridden to set additional segment generator configs.
    */
   @SuppressWarnings("unused")
-  protected static void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig,
-      Path hdfsInputFile, int sequenceId) {
+  protected static void addAdditionalSegmentGeneratorConfigs(SegmentGeneratorConfig segmentGeneratorConfig, Path hdfsInputFile, int sequenceId) {
   }
 
   public void run()
@@ -92,8 +91,7 @@ public class SparkSegmentCreationJob extends SegmentCreationJob {
       _logger.info("Creating segments with data files: {}", dataFilePaths);
       for (int i = 0; i < numDataFiles; i++) {
         Path dataFilePath = dataFilePaths.get(i);
-        try (DataOutputStream dataOutputStream = outputDirFileSystem
-            .create(new Path(stagingInputDir, Integer.toString(i)))) {
+        try (DataOutputStream dataOutputStream = outputDirFileSystem.create(new Path(stagingInputDir, Integer.toString(i)))) {
           dataOutputStream.write(StringUtil.encodeUtf8(dataFilePath.toString() + " " + i));
           dataOutputStream.flush();
         }
@@ -127,15 +125,13 @@ public class SparkSegmentCreationJob extends SegmentCreationJob {
         localDirIndex.get(parentPath.toString()).add(dataFilePath.toString());
       }
       pathRDD.foreach(path -> {
-        SparkSegmentCreationFunction sparkSegmentCreationFunction =
-            new SparkSegmentCreationFunction(_properties, new Path(_stagingDir, "output").toString());
+        SparkSegmentCreationFunction sparkSegmentCreationFunction = new SparkSegmentCreationFunction(_properties, new Path(_stagingDir, "output").toString());
         sparkSegmentCreationFunction.run(path, getLocalDirIndex(localDirIndex, path));
         sparkSegmentCreationFunction.cleanup();
       });
     } else {
       pathRDD.zipWithIndex().foreach(tuple2 -> {
-        SparkSegmentCreationFunction sparkSegmentCreationFunction =
-            new SparkSegmentCreationFunction(_properties, new Path(_stagingDir, "output").toString());
+        SparkSegmentCreationFunction sparkSegmentCreationFunction = new SparkSegmentCreationFunction(_properties, new Path(_stagingDir, "output").toString());
         sparkSegmentCreationFunction.run(tuple2._1, tuple2._2);
         sparkSegmentCreationFunction.cleanup();
       });
@@ -166,9 +162,7 @@ public class SparkSegmentCreationJob extends SegmentCreationJob {
       throws IOException {
     if (_depsJarDir != null) {
       Path depsJarPath = new Path(_depsJarDir);
-      PinotSparkJobPreparationHelper
-          .addDepsJarToDistributedCacheHelper(FileSystem.get(depsJarPath.toUri(), new Configuration()), sparkContext,
-              depsJarPath);
+      PinotSparkJobPreparationHelper.addDepsJarToDistributedCacheHelper(FileSystem.get(depsJarPath.toUri(), new Configuration()), sparkContext, depsJarPath);
     }
   }
 
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentTarPushJob.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentTarPushJob.java
index f77cdc0..114a4f6 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentTarPushJob.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentTarPushJob.java
@@ -43,10 +43,8 @@ public class SparkSegmentTarPushJob extends SegmentTarPushJob {
     super(properties);
     _enableParallelPush =
         Boolean.parseBoolean(properties.getProperty(JobConfigConstants.ENABLE_PARALLEL_PUSH, JobConfigConstants.DEFAULT_ENABLE_PARALLEL_PUSH));
-    _pushJobParallelism =
-        Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_PARALLELISM, JobConfigConstants.DEFAULT_PUSH_JOB_PARALLELISM));
-    _pushJobRetry =
-        Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_RETRY, JobConfigConstants.DEFAULT_PUSH_JOB_RETRY));
+    _pushJobParallelism = Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_PARALLELISM, JobConfigConstants.DEFAULT_PUSH_JOB_PARALLELISM));
+    _pushJobRetry = Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_RETRY, JobConfigConstants.DEFAULT_PUSH_JOB_RETRY));
   }
 
   @Override
@@ -73,8 +71,7 @@ public class SparkSegmentTarPushJob extends SegmentTarPushJob {
           List<String> currentSegments = controllerRestApi.getAllSegments("OFFLINE");
           controllerRestApi.pushSegments(fileSystem, Arrays.asList(new Path(segmentTarPath)));
           if (_deleteExtraSegments) {
-            controllerRestApi
-                .deleteSegmentUris(getSegmentsToDelete(currentSegments, Arrays.asList(new Path(segmentTarPath))));
+            controllerRestApi.deleteSegmentUris(getSegmentsToDelete(currentSegments, Arrays.asList(new Path(segmentTarPath))));
           }
         }
       });
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentUriPushJob.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentUriPushJob.java
index 8b14892..3378b07 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentUriPushJob.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/jobs/SparkSegmentUriPushJob.java
@@ -39,8 +39,7 @@ public class SparkSegmentUriPushJob extends SegmentUriPushJob {
     super(properties);
     _enableParallelPush =
         Boolean.parseBoolean(properties.getProperty(JobConfigConstants.ENABLE_PARALLEL_PUSH, JobConfigConstants.DEFAULT_ENABLE_PARALLEL_PUSH));
-    _pushJobParallelism =
-        Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_PARALLELISM, JobConfigConstants.DEFAULT_PUSH_JOB_PARALLELISM));
+    _pushJobParallelism = Integer.parseInt(properties.getProperty(JobConfigConstants.PUSH_JOB_PARALLELISM, JobConfigConstants.DEFAULT_PUSH_JOB_PARALLELISM));
   }
 
   @Override
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/utils/PinotSparkJobPreparationHelper.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/utils/PinotSparkJobPreparationHelper.java
index 5f1a676..8433cad 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/utils/PinotSparkJobPreparationHelper.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/main/java/org/apache/pinot/spark/utils/PinotSparkJobPreparationHelper.java
@@ -28,10 +28,12 @@ import org.slf4j.LoggerFactory;
 
 
 public class PinotSparkJobPreparationHelper {
+  private PinotSparkJobPreparationHelper() {
+  }
+
   private static final Logger _logger = LoggerFactory.getLogger(PinotSparkJobPreparationHelper.class);
 
-  public static void addDepsJarToDistributedCacheHelper(FileSystem fileSystem, JavaSparkContext sparkContext,
-      Path depsJarDir)
+  public static void addDepsJarToDistributedCacheHelper(FileSystem fileSystem, JavaSparkContext sparkContext, Path depsJarDir)
       throws IOException {
     FileStatus[] fileStatuses = fileSystem.listStatus(depsJarDir);
     for (FileStatus fileStatus : fileStatuses) {
diff --git a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/SegmentCreationSparkTest.java b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/org/apache/pinot/spark/SegmentCreationSparkTest.java
similarity index 96%
rename from pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/SegmentCreationSparkTest.java
rename to pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/org/apache/pinot/spark/SegmentCreationSparkTest.java
index c3aee93..9be6e6d 100644
--- a/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/SegmentCreationSparkTest.java
+++ b/pinot-plugins/pinot-batch-ingestion/v0_deprecated/pinot-spark/src/test/java/org/apache/pinot/spark/SegmentCreationSparkTest.java
@@ -16,6 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+package org.apache.pinot.spark;
 
 import com.google.common.base.Preconditions;
 import com.holdenkarau.spark.testing.SharedJavaSparkContext;
@@ -57,8 +58,7 @@ import static org.testng.Assert.assertTrue;
 
 public class SegmentCreationSparkTest extends SharedJavaSparkContext implements Serializable {
   private static final String SAMPLE_DATA_PATH =
-      Preconditions.checkNotNull(SegmentCreationSparkTest.class.getClassLoader().getResource("test_sample_data.csv"))
-          .getPath();
+      Preconditions.checkNotNull(SegmentCreationSparkTest.class.getClassLoader().getResource("test_sample_data.csv")).getPath();
   private static final File TEMP_DIR = new File(FileUtils.getTempDirectory(), "SegmentCreationSparkTest");
 
   @BeforeClass
@@ -73,8 +73,7 @@ public class SegmentCreationSparkTest extends SharedJavaSparkContext implements
     TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(tableName).build();
     Schema tableSchema = new Schema.SchemaBuilder().setSchemaName(tableName).build();
 
-    SparkConf conf = new SparkConf().setMaster("local").setAppName("test").set("spark.driver.host", "localhost")
-        .set("spark.ui.enabled", "false");
+    SparkConf conf = new SparkConf().setMaster("local").setAppName("test").set("spark.driver.host", "localhost").set("spark.ui.enabled", "false");
     JavaSparkContext jsc = new JavaSparkContext(conf);
     SQLContext sqlContext = new SQLContext(jsc);
 
diff --git a/pinot-plugins/pinot-environment/pinot-azure/src/main/java/org/apache/pinot/plugin/provider/AzureEnvironmentProvider.java b/pinot-plugins/pinot-environment/pinot-azure/src/main/java/org/apache/pinot/plugin/provider/AzureEnvironmentProvider.java
index c9c59d5..0c365ae 100644
--- a/pinot-plugins/pinot-environment/pinot-azure/src/main/java/org/apache/pinot/plugin/provider/AzureEnvironmentProvider.java
+++ b/pinot-plugins/pinot-environment/pinot-azure/src/main/java/org/apache/pinot/plugin/provider/AzureEnvironmentProvider.java
@@ -63,7 +63,7 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
 
   public void init(PinotConfiguration pinotConfiguration) {
     Preconditions.checkArgument(0 < Integer.parseInt(pinotConfiguration.getProperty(MAX_RETRY)),
-         "[AzureEnvironmentProvider]: " + MAX_RETRY + " cannot be less than or equal to 0");
+        "[AzureEnvironmentProvider]: " + MAX_RETRY + " cannot be less than or equal to 0");
     Preconditions.checkArgument(!StringUtils.isBlank(pinotConfiguration.getProperty(IMDS_ENDPOINT)),
         "[AzureEnvironmentProvider]: " + IMDS_ENDPOINT + " should not be null or empty");
 
@@ -72,20 +72,15 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
     int connectionTimeoutMillis = Integer.parseInt(pinotConfiguration.getProperty(CONNECTION_TIMEOUT_MILLIS));
     int requestTimeoutMillis = Integer.parseInt(pinotConfiguration.getProperty(REQUEST_TIMEOUT_MILLIS));
 
-    final RequestConfig requestConfig = RequestConfig.custom()
-        .setConnectTimeout(connectionTimeoutMillis)
-        .setConnectionRequestTimeout(requestTimeoutMillis)
-        .build();
+    final RequestConfig requestConfig =
+        RequestConfig.custom().setConnectTimeout(connectionTimeoutMillis).setConnectionRequestTimeout(requestTimeoutMillis).build();
 
-    final HttpRequestRetryHandler httpRequestRetryHandler = (iOException, executionCount, httpContext) ->
-        !(executionCount >= _maxRetry
-            || iOException instanceof InterruptedIOException
-            || iOException instanceof UnknownHostException
-            || iOException instanceof SSLException
-            || HttpClientContext.adapt(httpContext).getRequest() instanceof HttpEntityEnclosingRequest);
+    final HttpRequestRetryHandler httpRequestRetryHandler =
+        (iOException, executionCount, httpContext) -> !(executionCount >= _maxRetry || iOException instanceof InterruptedIOException
+            || iOException instanceof UnknownHostException || iOException instanceof SSLException || HttpClientContext.adapt(httpContext)
+            .getRequest() instanceof HttpEntityEnclosingRequest);
 
-    _closeableHttpClient =
-        HttpClients.custom().setDefaultRequestConfig(requestConfig).setRetryHandler(httpRequestRetryHandler).build();
+    _closeableHttpClient = HttpClients.custom().setDefaultRequestConfig(requestConfig).setRetryHandler(httpRequestRetryHandler).build();
   }
 
   // Constructor for test purposes.
@@ -93,8 +88,7 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
   public AzureEnvironmentProvider(int maxRetry, String imdsEndpoint, CloseableHttpClient closeableHttpClient) {
     _maxRetry = maxRetry;
     _imdsEndpoint = imdsEndpoint;
-    _closeableHttpClient = Preconditions.checkNotNull(closeableHttpClient,
-        "[AzureEnvironmentProvider]: Closeable Http Client cannot be null");
+    _closeableHttpClient = Preconditions.checkNotNull(closeableHttpClient, "[AzureEnvironmentProvider]: Closeable Http Client cannot be null");
   }
 
   /**
@@ -116,8 +110,7 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
       final JsonNode computeNode = jsonNode.path(COMPUTE);
 
       if (computeNode.isMissingNode()) {
-        throw new RuntimeException(
-            "[AzureEnvironmentProvider]: Compute node is missing in the payload. Cannot retrieve failure domain information");
+        throw new RuntimeException("[AzureEnvironmentProvider]: Compute node is missing in the payload. Cannot retrieve failure domain information");
       }
       final JsonNode platformFailureDomainNode = computeNode.path(PLATFORM_FAULT_DOMAIN);
       if (platformFailureDomainNode.isMissingNode() || !platformFailureDomainNode.isTextual()) {
@@ -127,8 +120,7 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
       return platformFailureDomainNode.textValue();
     } catch (IOException ex) {
       throw new RuntimeException(
-          String.format("[AzureEnvironmentProvider]: Errors when parsing response payload from Azure Instance Metadata Service: %s",
-              responsePayload), ex);
+          String.format("[AzureEnvironmentProvider]: Errors when parsing response payload from Azure Instance Metadata Service: %s", responsePayload), ex);
     }
   }
 
@@ -146,15 +138,13 @@ public class AzureEnvironmentProvider implements PinotEnvironmentProvider {
       final StatusLine statusLine = closeableHttpResponse.getStatusLine();
       final int statusCode = statusLine.getStatusCode();
       if (statusCode != HttpStatus.SC_OK) {
-        final String errorMsg = String.format(
-            "[AzureEnvironmentProvider]: Failed to retrieve azure instance metadata. Response Status code: %s", statusCode);
+        final String errorMsg = String.format("[AzureEnvironmentProvider]: Failed to retrieve azure instance metadata. Response Status code: %s", statusCode);
         throw new RuntimeException(errorMsg);
       }
       return EntityUtils.toString(closeableHttpResponse.getEntity());
     } catch (IOException ex) {
       throw new RuntimeException(
-          String.format("[AzureEnvironmentProvider]: Failed to retrieve metadata from Azure Instance Metadata Service %s",
-              _imdsEndpoint), ex);
+          String.format("[AzureEnvironmentProvider]: Failed to retrieve metadata from Azure Instance Metadata Service %s", _imdsEndpoint), ex);
     }
   }
 }
diff --git a/pinot-plugins/pinot-environment/pinot-azure/src/test/java/org/apache/pinot/plugin/provider/AzureEnvironmentProviderTest.java b/pinot-plugins/pinot-environment/pinot-azure/src/test/java/org/apache/pinot/plugin/provider/AzureEnvironmentProviderTest.java
index b0113e3..2761981 100644
--- a/pinot-plugins/pinot-environment/pinot-azure/src/test/java/org/apache/pinot/plugin/provider/AzureEnvironmentProviderTest.java
+++ b/pinot-plugins/pinot-environment/pinot-azure/src/test/java/org/apache/pinot/plugin/provider/AzureEnvironmentProviderTest.java
@@ -33,10 +33,12 @@ import org.testng.Assert;
 import org.testng.annotations.BeforeMethod;
 import org.testng.annotations.Test;
 
-import static org.apache.http.HttpStatus.*;
-import static org.apache.pinot.plugin.provider.AzureEnvironmentProvider.*;
+import static org.apache.http.HttpStatus.SC_NOT_FOUND;
+import static org.apache.pinot.plugin.provider.AzureEnvironmentProvider.IMDS_ENDPOINT;
+import static org.apache.pinot.plugin.provider.AzureEnvironmentProvider.MAX_RETRY;
 import static org.mockito.Mockito.*;
-import static org.mockito.MockitoAnnotations.*;
+import static org.mockito.MockitoAnnotations.initMocks;
+
 
 /**
  * Unit test for {@link AzureEnvironmentProviderTest}
@@ -71,7 +73,8 @@ public class AzureEnvironmentProviderTest {
   }
 
   @Test
-  public void testFailureDomainRetrieval() throws IOException {
+  public void testFailureDomainRetrieval()
+      throws IOException {
     mockUtil();
     when(_mockHttpEntity.getContent()).thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_FILE));
     String failureDomain = _azureEnvironmentProviderWithParams.getFailureDomain();
@@ -83,8 +86,8 @@ public class AzureEnvironmentProviderTest {
     verifyNoMoreInteractions(_mockHttpClient, _mockHttpResponse, _mockStatusLine);
   }
 
-  @Test(expectedExceptions = IllegalArgumentException.class,
-      expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: imdsEndpoint should not be null or empty")
+  @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: imdsEndpoint should not be "
+      + "null or empty")
   public void testInvalidIMDSEndpoint() {
     Map<String, Object> map = _pinotConfiguration.toMap();
     map.put(MAX_RETRY, "3");
@@ -93,8 +96,8 @@ public class AzureEnvironmentProviderTest {
     _azureEnvironmentProvider.init(pinotConfiguration);
   }
 
-  @Test(expectedExceptions = IllegalArgumentException.class,
-        expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: maxRetry cannot be less than or equal to 0")
+  @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: maxRetry cannot be less than "
+      + "or equal to 0")
   public void testInvalidRetryCount() {
     Map<String, Object> map = _pinotConfiguration.toMap();
     map.put(MAX_RETRY, "0");
@@ -102,43 +105,43 @@ public class AzureEnvironmentProviderTest {
     _azureEnvironmentProvider.init(pinotConfiguration);
   }
 
-  @Test(expectedExceptions = NullPointerException.class,
-      expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Closeable Http Client cannot be null")
+  @Test(expectedExceptions = NullPointerException.class, expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Closeable Http Client cannot be "
+      + "null")
   public void testInvalidHttpClient() {
     new AzureEnvironmentProvider(3, IMDS_ENDPOINT_VALUE, null);
   }
 
-  @Test(expectedExceptions = RuntimeException.class,
-      expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Compute node is missing in the payload. "
-          + "Cannot retrieve failure domain information")
-  public void testMissingComputeNodeResponse() throws IOException {
+  @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp =
+      "\\[AzureEnvironmentProvider\\]: Compute node is missing in the payload. Cannot retrieve failure domain information")
+  public void testMissingComputeNodeResponse()
+      throws IOException {
     mockUtil();
-    when(_mockHttpEntity.getContent())
-        .thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_WITHOUT_COMPUTE_INFO));
+    when(_mockHttpEntity.getContent()).thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_WITHOUT_COMPUTE_INFO));
     _azureEnvironmentProviderWithParams.getFailureDomain();
   }
 
-  @Test(expectedExceptions = RuntimeException.class,
-      expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Json node platformFaultDomain is missing or is invalid."
+  @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp =
+      "\\[AzureEnvironmentProvider\\]: Json node platformFaultDomain is missing or is invalid."
           + " No failure domain information retrieved for given server instance")
-  public void testMissingFaultDomainResponse() throws IOException {
+  public void testMissingFaultDomainResponse()
+      throws IOException {
     mockUtil();
-    when(_mockHttpEntity.getContent())
-        .thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_WITHOUT_FAULT_DOMAIN_INFO));
+    when(_mockHttpEntity.getContent()).thenReturn(getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_WITHOUT_FAULT_DOMAIN_INFO));
     _azureEnvironmentProviderWithParams.getFailureDomain();
   }
 
-  @Test(expectedExceptions = RuntimeException.class,
-      expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Failed to retrieve azure instance metadata."
-          + " Response Status code: " + SC_NOT_FOUND)
-  public void testIMDSCallFailure() throws IOException {
+  @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp =
+      "\\[AzureEnvironmentProvider\\]: Failed to retrieve azure instance metadata. Response Status code: " + SC_NOT_FOUND)
+  public void testIMDSCallFailure()
+      throws IOException {
     mockUtil();
     when(_mockStatusLine.getStatusCode()).thenReturn(SC_NOT_FOUND);
     _azureEnvironmentProviderWithParams.getFailureDomain();
   }
 
   // Mock Response utility method
-  private void mockUtil() throws IOException {
+  private void mockUtil()
+      throws IOException {
     when(_mockHttpClient.execute(any(HttpGet.class))).thenReturn(_mockHttpResponse);
     when(_mockHttpResponse.getStatusLine()).thenReturn(_mockStatusLine);
     when(_mockStatusLine.getStatusCode()).thenReturn(HttpStatus.SC_OK);
diff --git a/pinot-plugins/pinot-environment/pom.xml b/pinot-plugins/pinot-environment/pom.xml
index 2917729..fd0c55a 100644
--- a/pinot-plugins/pinot-environment/pom.xml
+++ b/pinot-plugins/pinot-environment/pom.xml
@@ -37,9 +37,6 @@
   <properties>
     <pinot.root>${basedir}/../..</pinot.root>
     <plugin.type>pinot-environment</plugin.type>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <modules>
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/pom.xml b/pinot-plugins/pinot-file-system/pinot-adls/pom.xml
index 1eb43f8..2087766 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/pom.xml
+++ b/pinot-plugins/pinot-file-system/pinot-adls/pom.xml
@@ -34,9 +34,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java
index ac37c00..a7a4412 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java
@@ -102,8 +102,7 @@ public class ADLSGen2PinotFS extends PinotFS {
   public ADLSGen2PinotFS() {
   }
 
-  public ADLSGen2PinotFS(DataLakeFileSystemClient fileSystemClient,
-      BlobServiceClient blobServiceClient) {
+  public ADLSGen2PinotFS(DataLakeFileSystemClient fileSystemClient, BlobServiceClient blobServiceClient) {
     _fileSystemClient = fileSystemClient;
     _blobServiceClient = blobServiceClient;
   }
@@ -128,18 +127,15 @@ public class ADLSGen2PinotFS extends PinotFS {
     DataLakeServiceClientBuilder dataLakeServiceClientBuilder = new DataLakeServiceClientBuilder().endpoint(dfsServiceEndpointUrl);
     BlobServiceClientBuilder blobServiceClientBuilder = new BlobServiceClientBuilder().endpoint(blobServiceEndpointUrl);
 
-    if (accountName!= null && accessKey != null) {
+    if (accountName != null && accessKey != null) {
       LOGGER.info("Authenticating using the access key to the account.");
       StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(accountName, accessKey);
       dataLakeServiceClientBuilder.credential(sharedKeyCredential);
       blobServiceClientBuilder.credential(sharedKeyCredential);
     } else if (clientId != null && clientSecret != null && tenantId != null) {
       LOGGER.info("Authenticating using Azure Active Directory");
-      ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder()
-          .clientId(clientId)
-          .clientSecret(clientSecret)
-          .tenantId(tenantId)
-          .build();
+      ClientSecretCredential clientSecretCredential =
+          new ClientSecretCredentialBuilder().clientId(clientId).clientSecret(clientSecret).tenantId(tenantId).build();
       dataLakeServiceClientBuilder.credential(clientSecretCredential);
       blobServiceClientBuilder.credential(clientSecretCredential);
     } else {
@@ -151,9 +147,9 @@ public class ADLSGen2PinotFS extends PinotFS {
     DataLakeServiceClient serviceClient = dataLakeServiceClientBuilder.buildClient();
     _fileSystemClient = getOrCreateClientWithFileSystem(serviceClient, fileSystemName);
 
-    LOGGER.info("ADLSGen2PinotFS is initialized (accountName={}, fileSystemName={}, dfsServiceEndpointUrl={}, "
-            + "blobServiceEndpointUrl={}, enableChecksum={})", accountName, fileSystemName, dfsServiceEndpointUrl,
-        blobServiceEndpointUrl, _enableChecksum);
+    LOGGER
+        .info("ADLSGen2PinotFS is initialized (accountName={}, fileSystemName={}, dfsServiceEndpointUrl={}, " + "blobServiceEndpointUrl={}, enableChecksum={})",
+            accountName, fileSystemName, dfsServiceEndpointUrl, blobServiceEndpointUrl, _enableChecksum);
   }
 
   /**
@@ -164,8 +160,7 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return DataLakeFileSystemClient with the specified fileSystemName.
    */
   @VisibleForTesting
-  public DataLakeFileSystemClient getOrCreateClientWithFileSystem(DataLakeServiceClient serviceClient,
-      String fileSystemName) {
+  public DataLakeFileSystemClient getOrCreateClientWithFileSystem(DataLakeServiceClient serviceClient, String fileSystemName) {
     try {
       DataLakeFileSystemClient fileSystemClient = serviceClient.getFileSystemClient(fileSystemName);
       // The return value is irrelevant. This is to test if the filesystem exists.
@@ -188,14 +183,15 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if creation succeeds else false.
    */
   @Override
-  public boolean mkdir(URI uri) throws IOException {
+  public boolean mkdir(URI uri)
+      throws IOException {
     LOGGER.debug("mkdir is called with uri='{}'", uri);
     try {
       // By default, create directory call will overwrite if the path already exists. Setting IfNoneMatch = "*" to
       // prevent overwrite. https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create
       DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setIfNoneMatch("*");
-      _fileSystemClient.createDirectoryWithResponse(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri), null,
-          null, null, null, requestConditions, null, null);
+      _fileSystemClient
+          .createDirectoryWithResponse(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri), null, null, null, null, requestConditions, null, null);
       return true;
     } catch (DataLakeStorageException e) {
       // If the path already exists, doing nothing and return true
@@ -215,7 +211,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if deletion succeeds else false.
    */
   @Override
-  public boolean delete(URI segmentUri, boolean forceDelete) throws IOException {
+  public boolean delete(URI segmentUri, boolean forceDelete)
+      throws IOException {
     LOGGER.debug("delete is called with segmentUri='{}', forceDelete='{}'", segmentUri, forceDelete);
     try {
       boolean isDirectory = isDirectory(segmentUri);
@@ -243,11 +240,11 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if move succeeds else false.
    */
   @Override
-  public boolean doMove(URI srcUri, URI dstUri) throws IOException {
+  public boolean doMove(URI srcUri, URI dstUri)
+      throws IOException {
     LOGGER.debug("doMove is called with srcUri='{}', dstUri='{}'", srcUri, dstUri);
     try {
-      DataLakeDirectoryClient directoryClient =
-          _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri));
+      DataLakeDirectoryClient directoryClient = _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri));
       directoryClient.rename(null, AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(dstUri));
       return true;
     } catch (DataLakeStorageException e) {
@@ -263,7 +260,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if move succeeds else false.
    */
   @Override
-  public boolean copy(URI srcUri, URI dstUri) throws IOException {
+  public boolean copy(URI srcUri, URI dstUri)
+      throws IOException {
     LOGGER.debug("copy is called with srcUri='{}', dstUri='{}'", srcUri, dstUri);
     // If src and dst are the same, do nothing.
     if (srcUri.equals(dstUri)) {
@@ -315,10 +313,10 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if exists else false.
    */
   @Override
-  public boolean exists(URI fileUri) throws IOException {
+  public boolean exists(URI fileUri)
+      throws IOException {
     try {
-      _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri))
-          .getProperties();
+      _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri)).getProperties();
       return true;
     } catch (DataLakeStorageException e) {
       if (e.getStatusCode() == NOT_FOUND_STATUS_CODE) {
@@ -335,11 +333,10 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return size of the file
    */
   @Override
-  public long length(URI fileUri) throws IOException {
+  public long length(URI fileUri)
+      throws IOException {
     try {
-      PathProperties pathProperties =
-          _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri))
-              .getProperties();
+      PathProperties pathProperties = _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri)).getProperties();
       return pathProperties.getFileSize();
     } catch (DataLakeStorageException e) {
       throw new IOException(e);
@@ -354,18 +351,16 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return array of all the files in the target directory.
    */
   @Override
-  public String[] listFiles(URI fileUri, boolean recursive) throws IOException {
+  public String[] listFiles(URI fileUri, boolean recursive)
+      throws IOException {
     LOGGER.debug("listFiles is called with fileUri='{}', recursive='{}'", fileUri, recursive);
     try {
       // Unlike other Azure SDK APIs that takes url encoded path, ListPathsOptions takes decoded url
       // e.g) 'path/segment' instead of 'path%2Fsegment'
-      String pathForListPathsOptions =
-          Utility.urlDecode(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri));
+      String pathForListPathsOptions = Utility.urlDecode(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(fileUri));
       ListPathsOptions options = new ListPathsOptions().setPath(pathForListPathsOptions).setRecursive(recursive);
       PagedIterable<PathItem> iter = _fileSystemClient.listPaths(options, null);
-      return iter.stream()
-          .map(p -> AzurePinotFSUtil.convertAzureStylePathToUriStylePath(p.getName()))
-          .toArray(String[]::new);
+      return iter.stream().map(p -> AzurePinotFSUtil.convertAzureStylePathToUriStylePath(p.getName())).toArray(String[]::new);
     } catch (DataLakeStorageException e) {
       throw new IOException(e);
     }
@@ -379,7 +374,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return nothing.
    */
   @Override
-  public void copyToLocalFile(URI srcUri, File dstFile) throws Exception {
+  public void copyToLocalFile(URI srcUri, File dstFile)
+      throws Exception {
     LOGGER.debug("copyToLocalFile is called with srcUri='{}', dstFile='{}'", srcUri, dstFile);
     if (dstFile.exists()) {
       if (dstFile.isDirectory()) {
@@ -399,8 +395,7 @@ public class ADLSGen2PinotFS extends PinotFS {
     }
     // If MD5 hash is available as part of path properties, verify it with the local file
     if (_enableChecksum) {
-      DataLakeFileClient fileClient =
-          _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri));
+      DataLakeFileClient fileClient = _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri));
       byte[] md5ContentFromMetadata = fileClient.getProperties().getContentMd5();
       if (md5ContentFromMetadata != null && md5ContentFromMetadata.length > 0) {
         byte[] md5FromLocalFile = computeContentMd5(dstFile);
@@ -421,7 +416,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return nothing.
    */
   @Override
-  public void copyFromLocalFile(File srcFile, URI dstUri) throws Exception {
+  public void copyFromLocalFile(File srcFile, URI dstUri)
+      throws Exception {
     LOGGER.debug("copyFromLocalFile is called with srcFile='{}', dstUri='{}'", srcFile, dstUri);
     byte[] contentMd5 = computeContentMd5(srcFile);
     try (InputStream fileInputStream = new FileInputStream(srcFile)) {
@@ -436,7 +432,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if it's a directory else false.
    */
   @Override
-  public boolean isDirectory(URI uri) throws IOException {
+  public boolean isDirectory(URI uri)
+      throws IOException {
     try {
       PathProperties pathProperties = getPathProperties(uri);
       Map<String, String> metadata = pathProperties.getMetadata();
@@ -456,7 +453,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return the last modified time of the target file.
    */
   @Override
-  public long lastModified(URI uri) throws IOException {
+  public long lastModified(URI uri)
+      throws IOException {
     try {
       PathProperties pathProperties = getPathProperties(uri);
       OffsetDateTime offsetDateTime = pathProperties.getLastModified();
@@ -474,7 +472,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return true if touch succeeds else false.
    */
   @Override
-  public boolean touch(URI uri) throws IOException {
+  public boolean touch(URI uri)
+      throws IOException {
     // The following data lake gen2 API provides a way to update file properties including last modified time.
     // https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update
     // However, action = "setProperties" is available in REST API but not available in Java SDK yet.
@@ -482,8 +481,7 @@ public class ADLSGen2PinotFS extends PinotFS {
     // For now, directly use Blob service's API to get the same effect.
     // https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-properties
     try {
-      DataLakeFileClient fileClient =
-          _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri));
+      DataLakeFileClient fileClient = _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri));
       PathProperties pathProperties = fileClient.getProperties();
       fileClient.setHttpHeaders(getPathHttpHeaders(pathProperties));
       return true;
@@ -499,7 +497,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return the input stream with the contents of the file.
    */
   @Override
-  public InputStream open(URI uri) throws IOException {
+  public InputStream open(URI uri)
+      throws IOException {
     // Use Blob API since read() function from Data Lake Client currently takes "OutputStream" as an input and
     // flush bytes to an output stream. This needs to be piped back into input stream to implement this function.
     // On the other hand, Blob API directly allow you to open the input stream.
@@ -511,9 +510,9 @@ public class ADLSGen2PinotFS extends PinotFS {
     // this case, we need to override "close()" and delete temp file.
   }
 
-  private boolean copySrcToDst(URI srcUri, URI dstUri) throws IOException {
-    PathProperties pathProperties =
-        _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri)).getProperties();
+  private boolean copySrcToDst(URI srcUri, URI dstUri)
+      throws IOException {
+    PathProperties pathProperties = _fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(srcUri)).getProperties();
     try (InputStream inputStream = open(srcUri)) {
       return copyInputStreamToDst(inputStream, dstUri, pathProperties.getContentMd5());
     }
@@ -533,8 +532,7 @@ public class ADLSGen2PinotFS extends PinotFS {
     int bytesRead;
     long totalBytesRead = 0;
     byte[] buffer = new byte[BUFFER_SIZE];
-    DataLakeFileClient fileClient =
-        _fileSystemClient.createFile(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(dstUri));
+    DataLakeFileClient fileClient = _fileSystemClient.createFile(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(dstUri));
 
     // Update MD5 metadata
     if (contentMd5 != null) {
@@ -554,8 +552,7 @@ public class ADLSGen2PinotFS extends PinotFS {
         }
         // Upload 4MB at a time since Azure's limit for each append call is 4MB.
         ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(buffer, 0, bytesRead);
-        fileClient.appendWithResponse(byteArrayInputStream, totalBytesRead, bytesRead, md5BlockHash, null, null,
-            Context.NONE);
+        fileClient.appendWithResponse(byteArrayInputStream, totalBytesRead, bytesRead, md5BlockHash, null, null, Context.NONE);
         byteArrayInputStream.close();
         totalBytesRead += bytesRead;
       }
@@ -574,7 +571,8 @@ public class ADLSGen2PinotFS extends PinotFS {
    * @return byte array of md5 hash
    * @throws Exception
    */
-  private byte[] computeContentMd5(File file) throws Exception{
+  private byte[] computeContentMd5(File file)
+      throws Exception {
     MessageDigest messageDigest = MessageDigest.getInstance("MD5");
     int bytesRead;
     byte[] buffer = new byte[BUFFER_SIZE];
@@ -586,17 +584,14 @@ public class ADLSGen2PinotFS extends PinotFS {
     return messageDigest.digest();
   }
 
-  private PathProperties getPathProperties(URI uri) throws IOException {
-    return _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri))
-        .getProperties();
+  private PathProperties getPathProperties(URI uri)
+      throws IOException {
+    return _fileSystemClient.getDirectoryClient(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri)).getProperties();
   }
 
   private PathHttpHeaders getPathHttpHeaders(PathProperties pathProperties) {
-    return new PathHttpHeaders().setCacheControl(pathProperties.getCacheControl())
-        .setContentDisposition(pathProperties.getContentDisposition())
-        .setContentEncoding(pathProperties.getContentEncoding())
-        .setContentMd5(pathProperties.getContentMd5())
-        .setContentLanguage(pathProperties.getContentLanguage())
-        .setContentType(pathProperties.getContentType());
+    return new PathHttpHeaders().setCacheControl(pathProperties.getCacheControl()).setContentDisposition(pathProperties.getContentDisposition())
+        .setContentEncoding(pathProperties.getContentEncoding()).setContentMd5(pathProperties.getContentMd5())
+        .setContentLanguage(pathProperties.getContentLanguage()).setContentType(pathProperties.getContentType());
   }
 }
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFS.java b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFS.java
index dfe5793..121da17 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFS.java
@@ -127,9 +127,7 @@ public class AzurePinotFS extends PinotFS {
       inputStream.close();
       outputStream.close();
     } catch (IOException e) {
-      LOGGER
-          .error("Exception encountered during copy, input: '{}', output: '{}'.", srcUri.toString(), dstUri.toString(),
-              e);
+      LOGGER.error("Exception encountered during copy, input: '{}', output: '{}'.", srcUri.toString(), dstUri.toString(), e);
     }
     return true;
   }
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFSUtil.java b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFSUtil.java
index 2cf50e6..282b434 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFSUtil.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/AzurePinotFSUtil.java
@@ -43,7 +43,8 @@ public class AzurePinotFSUtil {
    * @return path in Azure Data Lake Gen2 format
    * @throws IOException
    */
-  public static String convertUriToAzureStylePath(URI uri) throws IOException {
+  public static String convertUriToAzureStylePath(URI uri)
+      throws IOException {
     // Pinot side code uses `URLEncoder` when building uri
     String path = URLDecoder.decode(uri.getRawPath(), "UTF-8");
     if (path.startsWith(DIRECTORY_DELIMITER)) {
@@ -65,7 +66,8 @@ public class AzurePinotFSUtil {
    * @return url encoded path in Azure Data Lake Gen2 format
    * @throws IOException
    */
-  public static String convertUriToUrlEncodedAzureStylePath(URI uri) throws IOException {
+  public static String convertUriToUrlEncodedAzureStylePath(URI uri)
+      throws IOException {
     return Utility.urlEncode(convertUriToAzureStylePath(uri));
   }
 
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/ADLSGen2PinotFSTest.java b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/ADLSGen2PinotFSTest.java
index 8085988..e0c6637 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/ADLSGen2PinotFSTest.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/ADLSGen2PinotFSTest.java
@@ -88,7 +88,8 @@ public class ADLSGen2PinotFSTest {
   private final static String MOCK_FILE_SYSTEM_NAME = "fileSystemName";
 
   @BeforeMethod
-  public void setup() throws URISyntaxException {
+  public void setup()
+      throws URISyntaxException {
     MockitoAnnotations.initMocks(this);
     _adlsGen2PinotFsUnderTest = new ADLSGen2PinotFS(_mockFileSystemClient, _mockBlobServiceClient);
     _mockURI = new URI("mock://mock");
@@ -96,9 +97,8 @@ public class ADLSGen2PinotFSTest {
 
   @AfterMethod
   public void tearDown() {
-    verifyNoMoreInteractions(_mockDataLakeStorageException, _mockServiceClient, _mockFileSystemClient,
-        _mockSimpleResponse, _mockDirectoryClient, _mockPathItem, _mockPagedIterable, _mockPathProperties,
-        _mockFileClient, _mockBlobContainerClient, _mockBlobClient, _mockBlobServiceClient, _mockBlobInputStream);
+    verifyNoMoreInteractions(_mockDataLakeStorageException, _mockServiceClient, _mockFileSystemClient, _mockSimpleResponse, _mockDirectoryClient, _mockPathItem,
+        _mockPagedIterable, _mockPathProperties, _mockFileClient, _mockBlobContainerClient, _mockBlobClient, _mockBlobServiceClient, _mockBlobInputStream);
   }
 
   @Test(expectedExceptions = IllegalArgumentException.class)
@@ -112,8 +112,7 @@ public class ADLSGen2PinotFSTest {
     when(_mockServiceClient.getFileSystemClient(MOCK_FILE_SYSTEM_NAME)).thenReturn(_mockFileSystemClient);
     when(_mockFileSystemClient.getProperties()).thenReturn(null);
 
-    final DataLakeFileSystemClient actual =
-        _adlsGen2PinotFsUnderTest.getOrCreateClientWithFileSystem(_mockServiceClient, MOCK_FILE_SYSTEM_NAME);
+    final DataLakeFileSystemClient actual = _adlsGen2PinotFsUnderTest.getOrCreateClientWithFileSystem(_mockServiceClient, MOCK_FILE_SYSTEM_NAME);
     Assert.assertEquals(actual, _mockFileSystemClient);
 
     verify(_mockFileSystemClient).getProperties();
@@ -128,8 +127,7 @@ public class ADLSGen2PinotFSTest {
     when(_mockDataLakeStorageException.getStatusCode()).thenReturn(404);
     when(_mockDataLakeStorageException.getErrorCode()).thenReturn("ContainerNotFound");
 
-    final DataLakeFileSystemClient actual =
-        _adlsGen2PinotFsUnderTest.getOrCreateClientWithFileSystem(_mockServiceClient, MOCK_FILE_SYSTEM_NAME);
+    final DataLakeFileSystemClient actual = _adlsGen2PinotFsUnderTest.getOrCreateClientWithFileSystem(_mockServiceClient, MOCK_FILE_SYSTEM_NAME);
     Assert.assertEquals(actual, _mockFileSystemClient);
 
     verify(_mockFileSystemClient).getProperties();
@@ -140,9 +138,9 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testMkDirHappy() throws IOException {
-    when(_mockFileSystemClient.createDirectoryWithResponse(any(), any(), any(), any(), any(), any(), any(), any()))
-        .thenReturn(_mockSimpleResponse);
+  public void testMkDirHappy()
+      throws IOException {
+    when(_mockFileSystemClient.createDirectoryWithResponse(any(), any(), any(), any(), any(), any(), any(), any())).thenReturn(_mockSimpleResponse);
 
     boolean actual = _adlsGen2PinotFsUnderTest.mkdir(_mockURI);
     Assert.assertTrue(actual);
@@ -151,9 +149,9 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testMkDirPathExists() throws IOException {
-    when(_mockFileSystemClient.createDirectoryWithResponse(any(), any(), any(), any(), any(), any(), any(), any()))
-        .thenThrow(_mockDataLakeStorageException);
+  public void testMkDirPathExists()
+      throws IOException {
+    when(_mockFileSystemClient.createDirectoryWithResponse(any(), any(), any(), any(), any(), any(), any(), any())).thenThrow(_mockDataLakeStorageException);
     when(_mockDataLakeStorageException.getStatusCode()).thenReturn(409);
     when(_mockDataLakeStorageException.getErrorCode()).thenReturn("PathAlreadyExists");
 
@@ -166,7 +164,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testIsDirectory() throws IOException {
+  public void testIsDirectory()
+      throws IOException {
     final HashMap<String, String> metadata = new HashMap<>();
     metadata.put("hdi_isfolder", "true");
 
@@ -183,7 +182,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testListFiles() throws IOException {
+  public void testListFiles()
+      throws IOException {
     when(_mockFileSystemClient.listPaths(any(), any())).thenReturn(_mockPagedIterable);
     when(_mockPagedIterable.stream()).thenReturn(Collections.singletonList(_mockPathItem).stream());
     when(_mockPathItem.getName()).thenReturn("foo");
@@ -206,7 +206,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testDeleteDirectory() throws IOException {
+  public void testDeleteDirectory()
+      throws IOException {
     final HashMap<String, String> metadata = new HashMap<>();
     metadata.put("hdi_isfolder", "true");
 
@@ -216,8 +217,7 @@ public class ADLSGen2PinotFSTest {
     when(_mockFileSystemClient.listPaths(any(), any())).thenReturn(_mockPagedIterable);
     when(_mockPagedIterable.stream()).thenReturn(Collections.singletonList(_mockPathItem).stream());
     when(_mockPathItem.getName()).thenReturn("foo");
-    when(_mockFileSystemClient.deleteDirectoryWithResponse(eq(""), eq(true), eq(null), eq(null), eq(Context.NONE)))
-        .thenReturn(_mockSimpleResponse);
+    when(_mockFileSystemClient.deleteDirectoryWithResponse(eq(""), eq(true), eq(null), eq(null), eq(Context.NONE))).thenReturn(_mockSimpleResponse);
     when(_mockSimpleResponse.getValue()).thenReturn(null);
 
     boolean actual = _adlsGen2PinotFsUnderTest.delete(_mockURI, true);
@@ -234,7 +234,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testDeleteFile() throws IOException {
+  public void testDeleteFile()
+      throws IOException {
     final HashMap<String, String> metadata = new HashMap<>();
     metadata.put("hdi_isfolder", "false");
 
@@ -253,7 +254,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testDoMove() throws IOException {
+  public void testDoMove()
+      throws IOException {
     when(_mockFileSystemClient.getDirectoryClient(any())).thenReturn(_mockDirectoryClient);
     when(_mockDirectoryClient.rename(eq(null), any())).thenReturn(_mockDirectoryClient);
 
@@ -274,7 +276,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testExistsTrue() throws IOException {
+  public void testExistsTrue()
+      throws IOException {
     when(_mockFileSystemClient.getDirectoryClient(any())).thenReturn(_mockDirectoryClient);
     when(_mockDirectoryClient.getProperties()).thenReturn(_mockPathProperties);
 
@@ -286,7 +289,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testExistsFalse() throws IOException {
+  public void testExistsFalse()
+      throws IOException {
     when(_mockFileSystemClient.getDirectoryClient(any())).thenReturn(_mockDirectoryClient);
     when(_mockDirectoryClient.getProperties()).thenThrow(_mockDataLakeStorageException);
     when(_mockDataLakeStorageException.getStatusCode()).thenReturn(404);
@@ -300,7 +304,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testExistsException() throws IOException {
+  public void testExistsException()
+      throws IOException {
     when(_mockFileSystemClient.getDirectoryClient(any())).thenReturn(_mockDirectoryClient);
     when(_mockDirectoryClient.getProperties()).thenThrow(_mockDataLakeStorageException);
     when(_mockDataLakeStorageException.getStatusCode()).thenReturn(123);
@@ -313,7 +318,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testLength() throws IOException {
+  public void testLength()
+      throws IOException {
     final long testLength = 42;
     when(_mockFileSystemClient.getDirectoryClient(any())).thenReturn(_mockDirectoryClient);
     when(_mockDirectoryClient.getProperties()).thenReturn(_mockPathProperties);
@@ -339,7 +345,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void testTouch() throws IOException {
+  public void testTouch()
+      throws IOException {
     when(_mockFileSystemClient.getFileClient(any())).thenReturn(_mockFileClient);
     when(_mockFileClient.getProperties()).thenReturn(_mockPathProperties);
     doNothing().when(_mockFileClient).setHttpHeaders(any());
@@ -378,7 +385,8 @@ public class ADLSGen2PinotFSTest {
   }
 
   @Test
-  public void open() throws IOException {
+  public void open()
+      throws IOException {
     when(_mockFileSystemClient.getFileSystemName()).thenReturn(MOCK_FILE_SYSTEM_NAME);
     when(_mockBlobServiceClient.getBlobContainerClient(MOCK_FILE_SYSTEM_NAME)).thenReturn(_mockBlobContainerClient);
     when(_mockBlobContainerClient.getBlobClient(any())).thenReturn(_mockBlobClient);
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSTest.java b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSTest.java
index 1c3be6d..6aa4a61 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSTest.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSTest.java
@@ -43,8 +43,7 @@ public class AzurePinotFSTest {
   @BeforeMethod
   public void setup()
       throws IOException {
-    _adlLocation =
-        new File(System.getProperty("java.io.tmpdir"), AzurePinotFSTest.class.getSimpleName()).getAbsolutePath();
+    _adlLocation = new File(System.getProperty("java.io.tmpdir"), AzurePinotFSTest.class.getSimpleName()).getAbsolutePath();
     FileUtils.deleteQuietly(new File(_adlLocation));
     Assert.assertTrue(new File(_adlLocation).mkdir(), "Could not make directory" + _adlLocation);
 
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSUtilTest.java b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSUtilTest.java
index ef02718..47d85f8 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSUtilTest.java
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/test/java/org/apache/pinot/plugin/filesystem/test/AzurePinotFSUtilTest.java
@@ -32,7 +32,8 @@ public class AzurePinotFSUtilTest {
   private static final String BASE_PATH = "abfss://test.dfs.core.windows.net";
 
   @Test
-  public void testConvertUriToAzureStylePath() throws Exception {
+  public void testConvertUriToAzureStylePath()
+      throws Exception {
     testUriToAzureStylePath("table_0", "segment_1", false);
     testUriToAzureStylePath("table_0", "segment %", false);
     testUriToAzureStylePath("table %", "segment_1", false);
@@ -40,7 +41,8 @@ public class AzurePinotFSUtilTest {
   }
 
   @Test
-  public void testConvertUriToUrlEncodedAzureStylePath() throws Exception {
+  public void testConvertUriToUrlEncodedAzureStylePath()
+      throws Exception {
     testUriToAzureStylePath("table_0", "segment_1", true);
     testUriToAzureStylePath("table_0", "segment %", true);
     testUriToAzureStylePath("table %", "segment_1", true);
@@ -48,7 +50,8 @@ public class AzurePinotFSUtilTest {
   }
 
   @Test
-  public void testConvertAzureStylePathToUriStylePath() throws Exception {
+  public void testConvertAzureStylePathToUriStylePath()
+      throws Exception {
     Assert.assertEquals(AzurePinotFSUtil.convertAzureStylePathToUriStylePath("a/b"), "/a/b");
     Assert.assertEquals(AzurePinotFSUtil.convertAzureStylePathToUriStylePath("a/b/"), "/a/b");
     Assert.assertEquals(AzurePinotFSUtil.convertAzureStylePathToUriStylePath("/a/b"), "/a/b");
@@ -60,7 +63,8 @@ public class AzurePinotFSUtilTest {
     Assert.assertEquals(AzurePinotFSUtil.convertAzureStylePathToUriStylePath("/table/segment %/"), "/table/segment %");
   }
 
-  public void testUriToAzureStylePath(String tableName, String segmentName, boolean urlEncoded) throws Exception {
+  public void testUriToAzureStylePath(String tableName, String segmentName, boolean urlEncoded)
+      throws Exception {
     // "/encode(dir)/encode(segment)"
     String expectedPath = String.join(File.separator, tableName, segmentName);
     URI uri = createUri(URLEncoder.encode(tableName, "UTF-8"), URLEncoder.encode(segmentName, "UTF-8"));
@@ -76,12 +80,12 @@ public class AzurePinotFSUtilTest {
 
     // Using a URI constructor. In this case, we don't need to encode
     // /dir/segment
-    uri = new URI(uri.getScheme(), uri.getHost(), File.separator + String.join(File.separator, tableName, segmentName),
-        null);
+    uri = new URI(uri.getScheme(), uri.getHost(), File.separator + String.join(File.separator, tableName, segmentName), null);
     checkUri(uri, expectedPath, urlEncoded);
   }
 
-  private void checkUri(URI uri, String expectedPath, boolean urlEncoded) throws IOException {
+  private void checkUri(URI uri, String expectedPath, boolean urlEncoded)
+      throws IOException {
     if (urlEncoded) {
       Assert.assertEquals(AzurePinotFSUtil.convertUriToUrlEncodedAzureStylePath(uri), Utility.urlEncode(expectedPath));
     } else {
diff --git a/pinot-plugins/pinot-file-system/pinot-adls/src/test/resources/log4j2.xml b/pinot-plugins/pinot-file-system/pinot-adls/src/test/resources/log4j2.xml
index 4bbc67f..f0d887a 100644
--- a/pinot-plugins/pinot-file-system/pinot-adls/src/test/resources/log4j2.xml
+++ b/pinot-plugins/pinot-file-system/pinot-adls/src/test/resources/log4j2.xml
@@ -29,7 +29,7 @@
   </Appenders>
   <Loggers>
     <AsyncRoot level="warn" additivity="false">
-      <AppenderRef ref="console" />
+      <AppenderRef ref="console"/>
     </AsyncRoot>
   </Loggers>
 </Configuration>
diff --git a/pinot-plugins/pinot-file-system/pinot-gcs/pom.xml b/pinot-plugins/pinot-file-system/pinot-gcs/pom.xml
index 4b6117a..ffc5b00 100644
--- a/pinot-plugins/pinot-file-system/pinot-gcs/pom.xml
+++ b/pinot-plugins/pinot-file-system/pinot-gcs/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsPinotFS.java b/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsPinotFS.java
index df96eb9..f811f05 100644
--- a/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsPinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsPinotFS.java
@@ -54,6 +54,7 @@ import static com.google.common.base.Preconditions.checkState;
 import static java.lang.String.format;
 import static org.apache.pinot.plugin.filesystem.GcsUri.createGcsUri;
 
+
 public class GcsPinotFS extends PinotFS {
   public static final String PROJECT_ID = "projectId";
   public static final String GCP_KEY = "gcpKey";
@@ -237,7 +238,8 @@ public class GcsPinotFS extends PinotFS {
     return _storage.get(gcsUri.getBucketName());
   }
 
-  private Blob getBlob(GcsUri gcsUri) throws IOException {
+  private Blob getBlob(GcsUri gcsUri)
+      throws IOException {
     try {
       return getBucket(gcsUri).get(gcsUri.getPath());
     } catch (StorageException e) {
@@ -249,7 +251,8 @@ public class GcsPinotFS extends PinotFS {
     return blob != null && blob.exists();
   }
 
-  private boolean existsFile(GcsUri gcsUri) throws IOException {
+  private boolean existsFile(GcsUri gcsUri)
+      throws IOException {
     return existsBlob(getBlob(gcsUri));
   }
 
@@ -278,16 +281,14 @@ public class GcsPinotFS extends PinotFS {
     try {
       // Return true if folder was not explicitly created but is a prefix of one or more files.
       // Use lazy iterable iterateAll() and verify that the iterator has elements.
-      return getBucket(gcsUri).list(Storage.BlobListOption.prefix(prefix))
-          .iterateAll()
-          .iterator()
-          .hasNext();
+      return getBucket(gcsUri).list(Storage.BlobListOption.prefix(prefix)).iterateAll().iterator().hasNext();
     } catch (Exception t) {
       throw new IOException(t);
     }
   }
 
-  private boolean isEmptyDirectory(GcsUri gcsUri) throws IOException {
+  private boolean isEmptyDirectory(GcsUri gcsUri)
+      throws IOException {
     if (!existsDirectory(gcsUri)) {
       return false;
     }
@@ -310,7 +311,8 @@ public class GcsPinotFS extends PinotFS {
     return isEmpty;
   }
 
-  private String[] listFiles(GcsUri fileUri, boolean recursive) throws IOException {
+  private String[] listFiles(GcsUri fileUri, boolean recursive)
+      throws IOException {
     try {
       ImmutableList.Builder<String> builder = ImmutableList.builder();
       String prefix = fileUri.getPrefix();
@@ -320,12 +322,11 @@ public class GcsPinotFS extends PinotFS {
       } else {
         page = _storage.list(fileUri.getBucketName(), Storage.BlobListOption.prefix(prefix), Storage.BlobListOption.currentDirectory());
       }
-      page.iterateAll()
-          .forEach(blob -> {
-            if (!blob.getName().equals(prefix)) {
-              builder.add(createGcsUri(fileUri.getBucketName(), blob.getName()).toString());
-            }
-          });
+      page.iterateAll().forEach(blob -> {
+        if (!blob.getName().equals(prefix)) {
+          builder.add(createGcsUri(fileUri.getBucketName(), blob.getName()).toString());
+        }
+      });
       String[] listedFiles = builder.build().toArray(new String[0]);
       LOGGER.info("Listed {} files from URI: {}, is recursive: {}", listedFiles.length, fileUri, recursive);
       return listedFiles;
@@ -334,7 +335,8 @@ public class GcsPinotFS extends PinotFS {
     }
   }
 
-  private boolean exists(GcsUri gcsUri) throws IOException {
+  private boolean exists(GcsUri gcsUri)
+      throws IOException {
     if (existsDirectory(gcsUri)) {
       return true;
     }
@@ -344,7 +346,8 @@ public class GcsPinotFS extends PinotFS {
     return existsFile(gcsUri);
   }
 
-  private boolean delete(GcsUri segmentUri, boolean forceDelete) throws IOException {
+  private boolean delete(GcsUri segmentUri, boolean forceDelete)
+      throws IOException {
     try {
       if (!exists(segmentUri)) {
         return forceDelete;
@@ -394,7 +397,8 @@ public class GcsPinotFS extends PinotFS {
     return deleteSucceeded;
   }
 
-  private boolean copyFile(GcsUri srcUri, GcsUri dstUri) throws IOException {
+  private boolean copyFile(GcsUri srcUri, GcsUri dstUri)
+      throws IOException {
     Blob blob = getBlob(srcUri);
     Blob newBlob = getBucket(dstUri).create(dstUri.getPath(), new byte[0]);
     CopyWriter copyWriter = blob.copyTo(newBlob.getBlobId());
@@ -402,8 +406,9 @@ public class GcsPinotFS extends PinotFS {
     return copyWriter.isDone() && blob.exists();
   }
 
-  private boolean copy(GcsUri srcUri, GcsUri dstUri) throws IOException {
-    if(!exists(srcUri)) {
+  private boolean copy(GcsUri srcUri, GcsUri dstUri)
+      throws IOException {
+    if (!exists(srcUri)) {
       throw new IOException(format("Source URI '%s' does not exist", srcUri));
     }
     if (srcUri.equals(dstUri)) {
diff --git a/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsUri.java b/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsUri.java
index 5d1fecd..7d89f2f 100644
--- a/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsUri.java
+++ b/pinot-plugins/pinot-file-system/pinot-gcs/src/main/java/org/apache/pinot/plugin/filesystem/GcsUri.java
@@ -47,10 +47,10 @@ public class GcsUri {
     checkState(!uri.isOpaque(), "URI cannot be opaque");
     // Use uri.getAuthority() instead of uri.getHost():
     // Bucket names can contain _'s: https://cloud.google.com/storage/docs/naming-buckets
-    this._uri = createUri(uri.getAuthority(), uri.getPath().replaceAll(DELIMITER + "+", DELIMITER));
-    this._path = memoize(this::calculatePath);
-    this._prefix = memoize(this::calculatePrefix);
-    this._absolutePath = memoize(this::calculateAbsolutePath);
+    _uri = createUri(uri.getAuthority(), uri.getPath().replaceAll(DELIMITER + "+", DELIMITER));
+    _path = memoize(this::calculatePath);
+    _prefix = memoize(this::calculatePrefix);
+    _absolutePath = memoize(this::calculateAbsolutePath);
   }
 
   public String getBucketName() {
@@ -158,7 +158,8 @@ public class GcsUri {
    */
   public String relativize(GcsUri subPath) {
     Path relativePath = _absolutePath.get().relativize(subPath._absolutePath.get());
-    checkState(!relativePath.isAbsolute() && !relativePath.startsWith(".."), "Path '%s' is not a subdirectory of '%s'", _absolutePath.get(), subPath._absolutePath.get());
+    checkState(!relativePath.isAbsolute() && !relativePath.startsWith(".."), "Path '%s' is not a subdirectory of '%s'", _absolutePath.get(),
+        subPath._absolutePath.get());
     return relativePath.toString();
   }
 
diff --git a/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsPinotFS.java b/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsPinotFS.java
index 1c1113e..fd2795e 100644
--- a/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsPinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsPinotFS.java
@@ -52,6 +52,7 @@ import static org.apache.pinot.plugin.filesystem.GcsUri.createGcsUri;
 import static org.testng.Assert.assertEquals;
 import static org.testng.Assert.assertTrue;
 
+
 /**
  * Integration test for GcsPinotFS
  *
@@ -69,170 +70,168 @@ import static org.testng.Assert.assertTrue;
  */
 @Test(singleThreaded = true)
 public class TestGcsPinotFS {
-    private static final String DATA_DIR_PREFIX = "testing-data";
-
-    private GcsPinotFS _pinotFS;
-    private GcsUri _dataDir;
-    private final Closer _closer = Closer.create();
-
-    @BeforeClass
-    public void setup() {
-        String keyFile = System.getenv("GOOGLE_APPLICATION_CREDENTIALS");
-        String projectId = System.getenv("GCP_PROJECT");
-        String bucket = System.getenv("GCS_BUCKET");
-        if (keyFile != null && projectId != null && bucket != null) {
-            _pinotFS = new GcsPinotFS();
-            _pinotFS.init(new PinotConfiguration(ImmutableMap.<String, Object> builder()
-                    .put(PROJECT_ID,projectId)
-                    .put(GCP_KEY, keyFile)
-                    .build()));
-            _dataDir = createGcsUri(bucket, DATA_DIR_PREFIX + randomUUID());
-        }
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        if (_pinotFS != null) {
-            _pinotFS.delete(_dataDir.getUri(), true);
-            _closer.close();
-        }
-    }
-
-    private void skipIfNotConfigured() {
-        if (_pinotFS == null) {
-            throw new SkipException("No google credentials supplied.");
-        }
+  private static final String DATA_DIR_PREFIX = "testing-data";
+
+  private GcsPinotFS _pinotFS;
+  private GcsUri _dataDir;
+  private final Closer _closer = Closer.create();
+
+  @BeforeClass
+  public void setup() {
+    String keyFile = System.getenv("GOOGLE_APPLICATION_CREDENTIALS");
+    String projectId = System.getenv("GCP_PROJECT");
+    String bucket = System.getenv("GCS_BUCKET");
+    if (keyFile != null && projectId != null && bucket != null) {
+      _pinotFS = new GcsPinotFS();
+      _pinotFS.init(new PinotConfiguration(ImmutableMap.<String, Object>builder().put(PROJECT_ID, projectId).put(GCP_KEY, keyFile).build()));
+      _dataDir = createGcsUri(bucket, DATA_DIR_PREFIX + randomUUID());
     }
-
-    private Path createLocalTempDirectory() throws IOException {
-        Path temporaryDirectory = Files.createDirectory(Paths.get("/tmp/" + DATA_DIR_PREFIX + "-" + randomUUID()));
-        _closer.register(() -> deleteDirectory(temporaryDirectory.toFile()));
-        return temporaryDirectory;
-    }
-
-    private GcsUri createTempDirectoryGcsUri() {
-        return _dataDir.resolve("dir-" + randomUUID());
+  }
+
+  @AfterClass
+  public void tearDown()
+      throws Exception {
+    if (_pinotFS != null) {
+      _pinotFS.delete(_dataDir.getUri(), true);
+      _closer.close();
     }
+  }
 
-    /**
-     * Resolved gcs uri does not contain trailing delimiter, e.g. "/",
-     * as the GcsUri.resolve() method uses Path.resolve() semantics.
-     *
-     * @param gcsUri
-     * @return path with trailing delimiter
-     */
-    private static GcsUri appendSlash(GcsUri gcsUri) {
-        return createGcsUri(gcsUri.getBucketName(), gcsUri.getPrefix());
+  private void skipIfNotConfigured() {
+    if (_pinotFS == null) {
+      throw new SkipException("No google credentials supplied.");
     }
-
-    private List<String> writeToFile(Path file, int count) {
-        List<String> lines = IntStream.range(0, count)
-                .mapToObj(n -> "line " + n)
-                .collect(toList());
-
-        try (BufferedWriter writer = Files.newBufferedWriter(file, UTF_8)) {
-            lines.forEach(line -> {
-                try {
-                    writer.write(line);
-                    writer.newLine();
-                } catch (IOException e) {
-                    throw new UncheckedIOException(e);
-                }
-            });
+  }
+
+  private Path createLocalTempDirectory()
+      throws IOException {
+    Path temporaryDirectory = Files.createDirectory(Paths.get("/tmp/" + DATA_DIR_PREFIX + "-" + randomUUID()));
+    _closer.register(() -> deleteDirectory(temporaryDirectory.toFile()));
+    return temporaryDirectory;
+  }
+
+  private GcsUri createTempDirectoryGcsUri() {
+    return _dataDir.resolve("dir-" + randomUUID());
+  }
+
+  /**
+   * Resolved gcs uri does not contain trailing delimiter, e.g. "/",
+   * as the GcsUri.resolve() method uses Path.resolve() semantics.
+   *
+   * @param gcsUri
+   * @return path with trailing delimiter
+   */
+  private static GcsUri appendSlash(GcsUri gcsUri) {
+    return createGcsUri(gcsUri.getBucketName(), gcsUri.getPrefix());
+  }
+
+  private List<String> writeToFile(Path file, int count) {
+    List<String> lines = IntStream.range(0, count).mapToObj(n -> "line " + n).collect(toList());
+
+    try (BufferedWriter writer = Files.newBufferedWriter(file, UTF_8)) {
+      lines.forEach(line -> {
+        try {
+          writer.write(line);
+          writer.newLine();
         } catch (IOException e) {
-            throw new UncheckedIOException(e);
+          throw new UncheckedIOException(e);
         }
-        return lines;
+      });
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
     }
-
-    private Stream<GcsUri> listFilesToStream(GcsUri gcsUri) throws IOException {
-        return Arrays.asList(_pinotFS.listFiles(gcsUri.getUri(), true)).stream()
-                .map(URI::create)
-                .map(GcsUri::new);
-    }
-
-    @Test
-    public void testGcs() throws Exception {
-        skipIfNotConfigured();
-        // Create empty file
-        Path localTmpDir = createLocalTempDirectory();
-        Path emptyFile = localTmpDir.resolve("empty");
-        emptyFile.toFile().createNewFile();
-
-        // Create non-empty file
-        Path file1 = localTmpDir.resolve("file1");
-        List<String> expectedLinesFromFile =writeToFile(file1, 10);
-        List<String> actualLinesFromFile = Files.readAllLines(file1, UTF_8);
-        // Sanity check
-        assertEquals(actualLinesFromFile, expectedLinesFromFile);
-
-        // Gcs Temporary Directory
-        GcsUri gcsDirectoryUri = createTempDirectoryGcsUri();
-        Set<GcsUri> expectedElements = new HashSet<>();
-        expectedElements.add(appendSlash(gcsDirectoryUri));
-
-        // Test mkdir()
-        // Create the temp directory, which also creates any missing parent paths
-        _pinotFS.mkdir(gcsDirectoryUri.getUri());
-
-        GcsUri emptyFileGcsUri = gcsDirectoryUri.resolve("empty");
-        expectedElements.add(emptyFileGcsUri);
-
-        // Copy the empty file
-        _pinotFS.copyFromLocalFile(emptyFile.toFile(), emptyFileGcsUri.getUri());
-        expectedElements.add(appendSlash(emptyFileGcsUri));
-
-        // Test making a subdirectory with the same name as an object.
-        // This is allowed in gcs
-        _pinotFS.mkdir(emptyFileGcsUri.getUri());
-
-        GcsUri nonEmptyFileGcsUri = gcsDirectoryUri.resolve("empty/file1");
-        expectedElements.add(nonEmptyFileGcsUri);
-        // Copy the non empty file to the new folder
-        _pinotFS.copyFromLocalFile(file1.toFile(), nonEmptyFileGcsUri.getUri());
-
-        // Test listFiles()
-        // Check that all the files are there
-        assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements);
-        // Check that the non-empty file has the expected contents
-        Path nonEmptyFileFromGcs = localTmpDir.resolve("nonEmptyFileFromGcs");
-        _pinotFS.copyToLocalFile(nonEmptyFileGcsUri.getUri(), nonEmptyFileFromGcs.toFile());
-        assertEquals(Files.readAllLines(nonEmptyFileFromGcs), expectedLinesFromFile);
-
-        // Test gcs copy single file to file
-        GcsUri nonEmptyFileGcsUriCopy = gcsDirectoryUri.resolve("empty/file2");
-        _pinotFS.copy(nonEmptyFileGcsUri.getUri(), nonEmptyFileGcsUriCopy.getUri());
-        assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUriCopy)), format("Cannot find file '%s'", nonEmptyFileGcsUriCopy));
-
-        // Test gcs delete single file
-        _pinotFS.delete(nonEmptyFileGcsUriCopy.getUri(), false);
-        assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUriCopy)), format("Unexpected: found file '%s'", nonEmptyFileGcsUriCopy));
-
-        // Test copy directory -> directory
-        GcsUri gcsDirectoryUriCopy = createTempDirectoryGcsUri();
-        _pinotFS.copy(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri());
-
-        Set<GcsUri> expectedElementsCopy = new HashSet<>();
-        String directoryName = Paths.get(gcsDirectoryUri.getPath()).getFileName().toString();
-        String directoryCopyName = Paths.get(gcsDirectoryUriCopy.getPath()).getFileName().toString();
-        for (GcsUri element : ImmutableList.copyOf(expectedElements)) {
-            expectedElementsCopy.add(createGcsUri(element.getBucketName(), element.getPath().replace(directoryName, directoryCopyName)));
-        }
-        expectedElementsCopy.addAll(expectedElements);
-        assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy);
-        // Test delete directory
-        _pinotFS.delete(gcsDirectoryUriCopy.getUri(), true);
-        assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements);
-
-        // Test move directory
-        _pinotFS.move(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri(), true);
-        expectedElementsCopy.removeAll(expectedElements);
-        assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy);
-
-        // Test move file to different directory
-        GcsUri movedFileGcsUri = gcsDirectoryUriCopy.resolve("empty/file1");
-        assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUri)));
-        _pinotFS.move(movedFileGcsUri.getUri(), nonEmptyFileGcsUri.getUri(), false);
-        assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUri)));
+    return lines;
+  }
+
+  private Stream<GcsUri> listFilesToStream(GcsUri gcsUri)
+      throws IOException {
+    return Arrays.asList(_pinotFS.listFiles(gcsUri.getUri(), true)).stream().map(URI::create).map(GcsUri::new);
+  }
+
+  @Test
+  public void testGcs()
+      throws Exception {
+    skipIfNotConfigured();
+    // Create empty file
+    Path localTmpDir = createLocalTempDirectory();
+    Path emptyFile = localTmpDir.resolve("empty");
+    emptyFile.toFile().createNewFile();
+
+    // Create non-empty file
+    Path file1 = localTmpDir.resolve("file1");
+    List<String> expectedLinesFromFile = writeToFile(file1, 10);
+    List<String> actualLinesFromFile = Files.readAllLines(file1, UTF_8);
+    // Sanity check
+    assertEquals(actualLinesFromFile, expectedLinesFromFile);
+
+    // Gcs Temporary Directory
+    GcsUri gcsDirectoryUri = createTempDirectoryGcsUri();
+    Set<GcsUri> expectedElements = new HashSet<>();
+    expectedElements.add(appendSlash(gcsDirectoryUri));
+
+    // Test mkdir()
+    // Create the temp directory, which also creates any missing parent paths
+    _pinotFS.mkdir(gcsDirectoryUri.getUri());
+
+    GcsUri emptyFileGcsUri = gcsDirectoryUri.resolve("empty");
+    expectedElements.add(emptyFileGcsUri);
+
+    // Copy the empty file
+    _pinotFS.copyFromLocalFile(emptyFile.toFile(), emptyFileGcsUri.getUri());
+    expectedElements.add(appendSlash(emptyFileGcsUri));
+
+    // Test making a subdirectory with the same name as an object.
+    // This is allowed in gcs
+    _pinotFS.mkdir(emptyFileGcsUri.getUri());
+
+    GcsUri nonEmptyFileGcsUri = gcsDirectoryUri.resolve("empty/file1");
+    expectedElements.add(nonEmptyFileGcsUri);
+    // Copy the non empty file to the new folder
+    _pinotFS.copyFromLocalFile(file1.toFile(), nonEmptyFileGcsUri.getUri());
+
+    // Test listFiles()
+    // Check that all the files are there
+    assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements);
+    // Check that the non-empty file has the expected contents
+    Path nonEmptyFileFromGcs = localTmpDir.resolve("nonEmptyFileFromGcs");
+    _pinotFS.copyToLocalFile(nonEmptyFileGcsUri.getUri(), nonEmptyFileFromGcs.toFile());
+    assertEquals(Files.readAllLines(nonEmptyFileFromGcs), expectedLinesFromFile);
+
+    // Test gcs copy single file to file
+    GcsUri nonEmptyFileGcsUriCopy = gcsDirectoryUri.resolve("empty/file2");
+    _pinotFS.copy(nonEmptyFileGcsUri.getUri(), nonEmptyFileGcsUriCopy.getUri());
+    assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUriCopy)), format("Cannot find file '%s'", nonEmptyFileGcsUriCopy));
+
+    // Test gcs delete single file
+    _pinotFS.delete(nonEmptyFileGcsUriCopy.getUri(), false);
+    assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUriCopy)),
+        format("Unexpected: found file '%s'", nonEmptyFileGcsUriCopy));
+
+    // Test copy directory -> directory
+    GcsUri gcsDirectoryUriCopy = createTempDirectoryGcsUri();
+    _pinotFS.copy(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri());
+
+    Set<GcsUri> expectedElementsCopy = new HashSet<>();
+    String directoryName = Paths.get(gcsDirectoryUri.getPath()).getFileName().toString();
+    String directoryCopyName = Paths.get(gcsDirectoryUriCopy.getPath()).getFileName().toString();
+    for (GcsUri element : ImmutableList.copyOf(expectedElements)) {
+      expectedElementsCopy.add(createGcsUri(element.getBucketName(), element.getPath().replace(directoryName, directoryCopyName)));
     }
+    expectedElementsCopy.addAll(expectedElements);
+    assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy);
+    // Test delete directory
+    _pinotFS.delete(gcsDirectoryUriCopy.getUri(), true);
+    assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElements);
+
+    // Test move directory
+    _pinotFS.move(gcsDirectoryUri.getUri(), gcsDirectoryUriCopy.getUri(), true);
+    expectedElementsCopy.removeAll(expectedElements);
+    assertEquals(listFilesToStream(_dataDir).collect(toSet()), expectedElementsCopy);
+
+    // Test move file to different directory
+    GcsUri movedFileGcsUri = gcsDirectoryUriCopy.resolve("empty/file1");
+    assertTrue(listFilesToStream(gcsDirectoryUri).allMatch(uri -> !uri.equals(nonEmptyFileGcsUri)));
+    _pinotFS.move(movedFileGcsUri.getUri(), nonEmptyFileGcsUri.getUri(), false);
+    assertTrue(listFilesToStream(gcsDirectoryUri).anyMatch(uri -> uri.equals(nonEmptyFileGcsUri)));
+  }
 }
diff --git a/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsUri.java b/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsUri.java
index efb7901..b05f34f 100644
--- a/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsUri.java
+++ b/pinot-plugins/pinot-file-system/pinot-gcs/src/test/java/org/apache/pinot/plugin/filesystem/TestGcsUri.java
@@ -26,46 +26,47 @@ import static org.apache.pinot.plugin.filesystem.GcsUri.createGcsUri;
 import static org.testng.Assert.assertEquals;
 import static org.testng.Assert.assertThrows;
 
+
 public class TestGcsUri {
-    @Test
-    public void testDifferentScheme() {
-        URI uri = URI.create("file://bucket/file");
-        GcsUri gcsUri = new GcsUri(uri);
-        assertEquals(gcsUri.getUri().getScheme(), SCHEME);
-    }
+  @Test
+  public void testDifferentScheme() {
+    URI uri = URI.create("file://bucket/file");
+    GcsUri gcsUri = new GcsUri(uri);
+    assertEquals(gcsUri.getUri().getScheme(), SCHEME);
+  }
 
-    @Test
-    public void testNonAbsolutePath() {
-        // Relative path must be normalized to absolute path for gcs uri
-        // This is because the URI must have an absolute path component,
-        // ex. new URI("gs", "bucket",
-        GcsUri gcsUri = createGcsUri("bucket", "dir/file");
-        assertEquals(gcsUri, createGcsUri("bucket", "/dir/file"));
-    }
+  @Test
+  public void testNonAbsolutePath() {
+    // Relative path must be normalized to absolute path for gcs uri
+    // This is because the URI must have an absolute path component,
+    // ex. new URI("gs", "bucket",
+    GcsUri gcsUri = createGcsUri("bucket", "dir/file");
+    assertEquals(gcsUri, createGcsUri("bucket", "/dir/file"));
+  }
 
-    @Test
-    public void testUnderScoreBucketName() {
-        // This is why getAuthority is used instead of getHostName()
-        // see https://cloud.google.com/storage/docs/naming-buckets
-        // gcs allows _'s which would cause URI.getHost() to be null:
-        GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
-        assertEquals(gcsUri.getBucketName(), "bucket_name");
-    }
+  @Test
+  public void testUnderScoreBucketName() {
+    // This is why getAuthority is used instead of getHostName()
+    // see https://cloud.google.com/storage/docs/naming-buckets
+    // gcs allows _'s which would cause URI.getHost() to be null:
+    GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
+    assertEquals(gcsUri.getBucketName(), "bucket_name");
+  }
 
-    @Test
-    public void testRelativize() {
-        GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
-        GcsUri subDir = new GcsUri(URI.create("gs://bucket_name/dir/subdir/file"));
-        assertEquals(gcsUri.relativize(subDir), "subdir/file");
+  @Test
+  public void testRelativize() {
+    GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
+    GcsUri subDir = new GcsUri(URI.create("gs://bucket_name/dir/subdir/file"));
+    assertEquals(gcsUri.relativize(subDir), "subdir/file");
 
-        GcsUri nonRelativeGcsUri = new GcsUri(URI.create("gs://bucket_name/other/subdir/file"));
-        assertThrows(IllegalStateException.class, () -> gcsUri.relativize(nonRelativeGcsUri));
-    }
+    GcsUri nonRelativeGcsUri = new GcsUri(URI.create("gs://bucket_name/other/subdir/file"));
+    assertThrows(IllegalStateException.class, () -> gcsUri.relativize(nonRelativeGcsUri));
+  }
 
-    @Test
-    public void testResolve() {
-        GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
-        GcsUri subDir = gcsUri.resolve("subdir/file");
-        assertEquals(new GcsUri(URI.create("gs://bucket_name/dir/subdir/file")), subDir);
-    }
+  @Test
+  public void testResolve() {
+    GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir"));
+    GcsUri subDir = gcsUri.resolve("subdir/file");
+    assertEquals(new GcsUri(URI.create("gs://bucket_name/dir/subdir/file")), subDir);
+  }
 }
diff --git a/pinot-plugins/pinot-file-system/pinot-hdfs/pom.xml b/pinot-plugins/pinot-file-system/pinot-hdfs/pom.xml
index 3b839ce..8053618 100644
--- a/pinot-plugins/pinot-file-system/pinot-hdfs/pom.xml
+++ b/pinot-plugins/pinot-file-system/pinot-hdfs/pom.xml
@@ -34,9 +34,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java b/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
index 06cd7bb..4a7a5bb 100644
--- a/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-hdfs/src/main/java/org/apache/pinot/plugin/filesystem/HadoopPinotFS.java
@@ -39,6 +39,7 @@ import org.apache.pinot.spi.filesystem.PinotFS;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+
 /**
  * Implementation of PinotFS for the Hadoop Filesystem
  */
@@ -108,8 +109,7 @@ public class HadoopPinotFS extends PinotFS {
         Path sourceFilePath = sourceFile.getPath();
         if (sourceFile.isFile()) {
           try {
-            FileUtil.copy(_hadoopFS, sourceFilePath, _hadoopFS, new Path(target, sourceFilePath.getName()), false,
-                _hadoopConf);
+            FileUtil.copy(_hadoopFS, sourceFilePath, _hadoopFS, new Path(target, sourceFilePath.getName()), false, _hadoopConf);
           } catch (FileNotFoundException e) {
             LOGGER.warn("Not found file {}, skipping copying it...", sourceFilePath, e);
           }
@@ -184,8 +184,7 @@ public class HadoopPinotFS extends PinotFS {
       }
       long startMs = System.currentTimeMillis();
       _hadoopFS.copyToLocalFile(remoteFile, localFile);
-      LOGGER.debug("copied {} from hdfs to {} in local for size {}, take {} ms", srcUri, dstFilePath, dstFile.length(),
-          System.currentTimeMillis() - startMs);
+      LOGGER.debug("copied {} from hdfs to {} in local for size {}, take {} ms", srcUri, dstFilePath, dstFile.length(), System.currentTimeMillis() - startMs);
     } catch (IOException e) {
       LOGGER.warn("failed to fetch segment {} from hdfs to {}, might retry", srcUri, dstFile, e);
       throw e;
@@ -238,22 +237,19 @@ public class HadoopPinotFS extends PinotFS {
     return _hadoopFS.open(path);
   }
 
-  private void authenticate(Configuration hadoopConf,
-      PinotConfiguration configs) {
+  private void authenticate(Configuration hadoopConf, PinotConfiguration configs) {
     String principal = configs.getProperty(PRINCIPAL);
     String keytab = configs.getProperty(KEYTAB);
     if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
       UserGroupInformation.setConfiguration(hadoopConf);
       if (UserGroupInformation.isSecurityEnabled()) {
         try {
-          if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials() || !UserGroupInformation.getCurrentUser()
-              .getUserName().equals(principal)) {
+          if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials() || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
             LOGGER.info("Trying to authenticate user {} with keytab {}..", principal, keytab);
             UserGroupInformation.loginUserFromKeytab(principal, keytab);
           }
         } catch (IOException e) {
-          throw new RuntimeException(
-              String.format("Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e);
+          throw new RuntimeException(String.format("Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e);
         }
       }
     }
diff --git a/pinot-plugins/pinot-file-system/pinot-s3/pom.xml b/pinot-plugins/pinot-file-system/pinot-s3/pom.xml
index 302eda3..f0857aa 100644
--- a/pinot-plugins/pinot-file-system/pinot-s3/pom.xml
+++ b/pinot-plugins/pinot-file-system/pinot-s3/pom.xml
@@ -42,9 +42,6 @@
     <s3mock.version>2.1.19</s3mock.version>
     <javax.version>3.1.0</javax.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencyManagement>
diff --git a/pinot-plugins/pinot-file-system/pinot-s3/src/main/java/org/apache/pinot/plugin/filesystem/S3PinotFS.java b/pinot-plugins/pinot-file-system/pinot-s3/src/main/java/org/apache/pinot/plugin/filesystem/S3PinotFS.java
index 55873ec..4495363 100644
--- a/pinot-plugins/pinot-file-system/pinot-s3/src/main/java/org/apache/pinot/plugin/filesystem/S3PinotFS.java
+++ b/pinot-plugins/pinot-file-system/pinot-s3/src/main/java/org/apache/pinot/plugin/filesystem/S3PinotFS.java
@@ -101,15 +101,14 @@ public class S3PinotFS extends PinotFS {
         _serverSideEncryption = ServerSideEncryption.valueOf(serverSideEncryption);
       } catch (Exception e) {
         throw new UnsupportedOperationException(String
-            .format("Unknown value '%s' for S3PinotFS config: 'serverSideEncryption'. Supported values are: %s",
-                serverSideEncryption, Arrays.toString(ServerSideEncryption.knownValues().toArray())));
+            .format("Unknown value '%s' for S3PinotFS config: 'serverSideEncryption'. Supported values are: %s", serverSideEncryption,
+                Arrays.toString(ServerSideEncryption.knownValues().toArray())));
       }
       switch (_serverSideEncryption) {
         case AWS_KMS:
           _ssekmsKeyId = config.getProperty(SSE_KMS_KEY_ID_CONFIG_KEY);
           if (_ssekmsKeyId == null) {
-            throw new UnsupportedOperationException(
-                "Missing required config: 'sseKmsKeyId' when AWS_KMS is used for server side encryption");
+            throw new UnsupportedOperationException("Missing required config: 'sseKmsKeyId' when AWS_KMS is used for server side encryption");
           }
           _ssekmsEncryptionContext = config.getProperty(SSE_KMS_ENCRYPTION_CONTEXT_CONFIG_KEY);
           break;
@@ -131,8 +130,7 @@ public class S3PinotFS extends PinotFS {
         awsCredentialsProvider = DefaultCredentialsProvider.create();
       }
 
-      S3ClientBuilder s3ClientBuilder =
-          S3Client.builder().region(Region.of(region)).credentialsProvider(awsCredentialsProvider);
+      S3ClientBuilder s3ClientBuilder = S3Client.builder().region(Region.of(region)).credentialsProvider(awsCredentialsProvider);
       if (!isNullOrEmpty(config.getProperty(ENDPOINT))) {
         String endpoint = config.getProperty(ENDPOINT);
         try {
@@ -316,14 +314,11 @@ public class S3PinotFS extends PinotFS {
     try {
       if (isDirectory(segmentUri)) {
         if (!forceDelete) {
-          Preconditions
-              .checkState(isEmptyDirectory(segmentUri), "ForceDelete flag is not set and directory '%s' is not empty",
-                  segmentUri);
+          Preconditions.checkState(isEmptyDirectory(segmentUri), "ForceDelete flag is not set and directory '%s' is not empty", segmentUri);
         }
         String prefix = normalizeToDirectoryPrefix(segmentUri);
         ListObjectsV2Response listObjectsV2Response;
-        ListObjectsV2Request.Builder listObjectsV2RequestBuilder =
-            ListObjectsV2Request.builder().bucket(segmentUri.getHost());
+        ListObjectsV2Request.Builder listObjectsV2RequestBuilder = ListObjectsV2Request.builder().bucket(segmentUri.getHost());
 
         if (prefix.equals(DELIMITER)) {
           ListObjectsV2Request listObjectsV2Request = listObjectsV2RequestBuilder.build();
@@ -334,8 +329,7 @@ public class S3PinotFS extends PinotFS {
         }
         boolean deleteSucceeded = true;
         for (S3Object s3Object : listObjectsV2Response.contents()) {
-          DeleteObjectRequest deleteObjectRequest =
-              DeleteObjectRequest.builder().bucket(segmentUri.getHost()).key(s3Object.key()).build();
+          DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder().bucket(segmentUri.getHost()).key(s3Object.key()).build();
 
           DeleteObjectResponse deleteObjectResponse = _s3Client.deleteObject(deleteObjectRequest);
 
@@ -344,8 +338,7 @@ public class S3PinotFS extends PinotFS {
         return deleteSucceeded;
       } else {
         String prefix = sanitizePath(segmentUri.getPath());
-        DeleteObjectRequest deleteObjectRequest =
-            DeleteObjectRequest.builder().bucket(segmentUri.getHost()).key(prefix).build();
+        DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder().bucket(segmentUri.getHost()).key(prefix).build();
 
         DeleteObjectResponse deleteObjectResponse = _s3Client.deleteObject(deleteObjectRequest);
 
@@ -441,8 +434,7 @@ public class S3PinotFS extends PinotFS {
       boolean isDone = false;
       String prefix = normalizeToDirectoryPrefix(fileUri);
       while (!isDone) {
-        ListObjectsV2Request.Builder listObjectsV2RequestBuilder =
-            ListObjectsV2Request.builder().bucket(fileUri.getHost());
+        ListObjectsV2Request.Builder listObjectsV2RequestBuilder = ListObjectsV2Request.builder().bucket(fileUri.getHost());
         if (!prefix.equals(DELIMITER)) {
           listObjectsV2RequestBuilder = listObjectsV2RequestBuilder.prefix(prefix);
         }
@@ -509,8 +501,7 @@ public class S3PinotFS extends PinotFS {
         return true;
       }
 
-      ListObjectsV2Request listObjectsV2Request =
-          ListObjectsV2Request.builder().bucket(uri.getHost()).prefix(prefix).maxKeys(2).build();
+      ListObjectsV2Request listObjectsV2Request = ListObjectsV2Request.builder().bucket(uri.getHost()).prefix(prefix).maxKeys(2).build();
       ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(listObjectsV2Request);
       return listObjectsV2Response.hasContents();
     } catch (NoSuchKeyException e) {
@@ -538,8 +529,7 @@ public class S3PinotFS extends PinotFS {
       }
 
       String path = sanitizePath(uri.getPath());
-      CopyObjectRequest request = generateCopyObjectRequest(encodedUrl, uri, path,
-          ImmutableMap.of("lastModified", String.valueOf(System.currentTimeMillis())));
+      CopyObjectRequest request = generateCopyObjectRequest(encodedUrl, uri, path, ImmutableMap.of("lastModified", String.valueOf(System.currentTimeMillis())));
       _s3Client.copyObject(request);
       long newUpdateTime = getS3ObjectMetadata(uri).lastModified().toEpochMilli();
       return newUpdateTime > s3ObjectMetadata.lastModified().toEpochMilli();
@@ -569,10 +559,8 @@ public class S3PinotFS extends PinotFS {
     return putReqBuilder.build();
   }
 
-  private CopyObjectRequest generateCopyObjectRequest(String copySource, URI dest, String path,
-      Map<String, String> metadata) {
-    CopyObjectRequest.Builder copyReqBuilder =
-        CopyObjectRequest.builder().copySource(copySource).destinationBucket(dest.getHost()).destinationKey(path);
+  private CopyObjectRequest generateCopyObjectRequest(String copySource, URI dest, String path, Map<String, String> metadata) {
+    CopyObjectRequest.Builder copyReqBuilder = CopyObjectRequest.builder().copySource(copySource).destinationBucket(dest.getHost()).destinationKey(path);
     if (metadata != null) {
       copyReqBuilder.metadata(metadata).metadataDirective(MetadataDirective.REPLACE);
     }
diff --git a/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3PinotFSTest.java b/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3PinotFSTest.java
index 5a681ba..fe000e0 100644
--- a/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3PinotFSTest.java
+++ b/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3PinotFSTest.java
@@ -44,13 +44,14 @@ import software.amazon.awssdk.services.s3.model.S3Object;
 @Test
 @Listeners(com.adobe.testing.s3mock.testng.S3MockListener.class)
 public class S3PinotFSTest {
-  final String DELIMITER = "/";
-  S3PinotFS _s3PinotFS;
-  S3Client _s3Client;
-  final String BUCKET = "test-bucket";
-  final String SCHEME = "s3";
-  final String FILE_FORMAT = "%s://%s/%s";
-  final String DIR_FORMAT = "%s://%s";
+  private static final String DELIMITER = "/";
+  private static final String BUCKET = "test-bucket";
+  private static final String SCHEME = "s3";
+  private static final String FILE_FORMAT = "%s://%s/%s";
+  private static final String DIR_FORMAT = "%s://%s";
+
+  private S3PinotFS _s3PinotFS;
+  private S3Client _s3Client;
 
   @BeforeClass
   public void setUp() {
@@ -70,8 +71,7 @@ public class S3PinotFSTest {
 
   private void createEmptyFile(String folderName, String fileName) {
     String fileNameWithFolder = folderName + DELIMITER + fileName;
-    _s3Client
-        .putObject(S3TestUtils.getPutObjectRequest(BUCKET, fileNameWithFolder), RequestBody.fromBytes(new byte[0]));
+    _s3Client.putObject(S3TestUtils.getPutObjectRequest(BUCKET, fileNameWithFolder), RequestBody.fromBytes(new byte[0]));
   }
 
   @Test
@@ -83,11 +83,9 @@ public class S3PinotFSTest {
     for (String fileName : originalFiles) {
       _s3PinotFS.touch(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, fileName)));
     }
-    ListObjectsV2Response listObjectsV2Response =
-        _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
+    ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
 
-    String[] response = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("touch"))
-        .toArray(String[]::new);
+    String[] response = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("touch")).toArray(String[]::new);
 
     Assert.assertEquals(response.length, originalFiles.length);
     Assert.assertTrue(Arrays.equals(response, originalFiles));
@@ -104,11 +102,9 @@ public class S3PinotFSTest {
       String fileNameWithFolder = folder + DELIMITER + fileName;
       _s3PinotFS.touch(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, fileNameWithFolder)));
     }
-    ListObjectsV2Response listObjectsV2Response =
-        _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, folder, false));
+    ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, folder, false));
 
-    String[] response = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("touch"))
-        .toArray(String[]::new);
+    String[] response = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("touch")).toArray(String[]::new);
     Assert.assertEquals(response.length, originalFiles.length);
 
     Assert.assertTrue(Arrays.equals(response, Arrays.stream(originalFiles).map(x -> folder + DELIMITER + x).toArray()));
@@ -127,8 +123,7 @@ public class S3PinotFSTest {
 
     String[] actualFiles = _s3PinotFS.listFiles(URI.create(String.format(DIR_FORMAT, SCHEME, BUCKET)), false);
 
-    actualFiles =
-        Arrays.stream(actualFiles).filter(x -> x.contains("list")).toArray(String[]::new);
+    actualFiles = Arrays.stream(actualFiles).filter(x -> x.contains("list")).toArray(String[]::new);
     Assert.assertEquals(actualFiles.length, originalFiles.length);
 
     Assert.assertTrue(Arrays.equals(actualFiles, expectedFileNames.toArray()));
@@ -149,8 +144,9 @@ public class S3PinotFSTest {
     actualFiles = Arrays.stream(actualFiles).filter(x -> x.contains("list-2")).toArray(String[]::new);
     Assert.assertEquals(actualFiles.length, originalFiles.length);
 
-    Assert.assertTrue(
-        Arrays.equals(Arrays.stream(originalFiles).map(fileName -> String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + fileName)).toArray(), actualFiles));
+    Assert.assertTrue(Arrays
+        .equals(Arrays.stream(originalFiles).map(fileName -> String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + fileName)).toArray(),
+            actualFiles));
   }
 
   @Test
@@ -191,11 +187,8 @@ public class S3PinotFSTest {
 
     _s3PinotFS.delete(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, fileToDelete)), false);
 
-    ListObjectsV2Response listObjectsV2Response =
-        _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
-    String[] actualResponse =
-        listObjectsV2Response.contents().stream().map(x -> x.key().substring(1)).filter(x -> x.contains("delete"))
-            .toArray(String[]::new);
+    ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
+    String[] actualResponse = listObjectsV2Response.contents().stream().map(x -> x.key().substring(1)).filter(x -> x.contains("delete")).toArray(String[]::new);
 
     Assert.assertEquals(actualResponse.length, 2);
     Assert.assertTrue(Arrays.equals(actualResponse, expectedResultList.toArray()));
@@ -213,11 +206,8 @@ public class S3PinotFSTest {
 
     _s3PinotFS.delete(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folderName)), true);
 
-    ListObjectsV2Response listObjectsV2Response =
-        _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
-    String[] actualResponse =
-        listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("delete-2"))
-            .toArray(String[]::new);
+    ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
+    String[] actualResponse = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("delete-2")).toArray(String[]::new);
 
     Assert.assertEquals(0, actualResponse.length);
   }
@@ -235,10 +225,9 @@ public class S3PinotFSTest {
 
     boolean isBucketDir = _s3PinotFS.isDirectory(URI.create(String.format(DIR_FORMAT, SCHEME, BUCKET)));
     boolean isDir = _s3PinotFS.isDirectory(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder)));
-    boolean isDirChild = _s3PinotFS
-        .isDirectory(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder)));
-    boolean notIsDir = _s3PinotFS.isDirectory(URI.create(
-        String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "a-delete.txt")));
+    boolean isDirChild = _s3PinotFS.isDirectory(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder)));
+    boolean notIsDir =
+        _s3PinotFS.isDirectory(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "a-delete.txt")));
 
     Assert.assertTrue(isBucketDir);
     Assert.assertTrue(isDir);
@@ -260,12 +249,10 @@ public class S3PinotFSTest {
 
     boolean bucketExists = _s3PinotFS.exists(URI.create(String.format(DIR_FORMAT, SCHEME, BUCKET)));
     boolean dirExists = _s3PinotFS.exists(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder)));
-    boolean childDirExists =
-        _s3PinotFS.exists(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder)));
-    boolean fileExists = _s3PinotFS.exists(URI.create(
-        String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "a-ex.txt")));
-    boolean fileNotExists = _s3PinotFS.exists(URI.create(
-        String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "d-ex.txt")));
+    boolean childDirExists = _s3PinotFS.exists(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder)));
+    boolean fileExists = _s3PinotFS.exists(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "a-ex.txt")));
+    boolean fileNotExists =
+        _s3PinotFS.exists(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "d-ex.txt")));
 
     Assert.assertTrue(bucketExists);
     Assert.assertTrue(dirExists);
@@ -317,5 +304,4 @@ public class S3PinotFSTest {
     HeadObjectResponse headObjectResponse = _s3Client.headObject(S3TestUtils.getHeadObjectRequest(BUCKET, folderName));
     Assert.assertTrue(headObjectResponse.sdkHttpResponse().isSuccessful());
   }
-
 }
diff --git a/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3TestUtils.java b/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3TestUtils.java
index 5d1b725..e7938de 100644
--- a/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3TestUtils.java
+++ b/pinot-plugins/pinot-file-system/pinot-s3/src/test/java/org/apache/pinot/plugin/filesystem/S3TestUtils.java
@@ -24,6 +24,8 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest;
 
 
 public class S3TestUtils {
+  private S3TestUtils() {
+  }
 
   public static PutObjectRequest getPutObjectRequest(String bucket, String key) {
     return PutObjectRequest.builder().bucket(bucket).key(key).build();
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/pom.xml b/pinot-plugins/pinot-input-format/pinot-avro-base/pom.xml
index acf4c50..5b97aa2 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroIngestionSchemaValidator.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroIngestionSchemaValidator.java
index 2884989..15f72a6 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroIngestionSchemaValidator.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroIngestionSchemaValidator.java
@@ -93,8 +93,7 @@ public class AvroIngestionSchemaValidator implements IngestionSchemaValidator {
       org.apache.avro.Schema.Field avroColumnField = _avroSchema.getField(columnName);
       if (avroColumnField == null) {
         _missingPinotColumn.addMismatchReason(String
-            .format("The Pinot column: (%s: %s) is missing in the %s schema of input data.", columnName,
-                fieldSpec.getDataType().name(), getInputSchemaType()));
+            .format("The Pinot column: (%s: %s) is missing in the %s schema of input data.", columnName, fieldSpec.getDataType().name(), getInputSchemaType()));
         continue;
       }
       String avroColumnName = avroColumnField.schema().getName();
@@ -121,47 +120,44 @@ public class AvroIngestionSchemaValidator implements IngestionSchemaValidator {
         // check single-value multi-value mismatch
         if (avroColumnType.ordinal() < org.apache.avro.Schema.Type.STRING.ordinal()) {
           _singleValueMultiValueFieldMismatch.addMismatchReason(String
-              .format(
-                  "The Pinot column: %s is 'single-value' column but the column: %s from input %s is 'multi-value' column.",
-                  columnName, avroColumnName, getInputSchemaType()));
+              .format("The Pinot column: %s is 'single-value' column but the column: %s from input %s is 'multi-value' column.", columnName, avroColumnName,
+                  getInputSchemaType()));
         }
         FieldSpec.DataType dataTypeForSVColumn = AvroUtils.extractFieldDataType(avroColumnField);
         // check data type mismatch
         if (fieldSpec.getDataType() != dataTypeForSVColumn) {
           _dataTypeMismatch.addMismatchReason(String
-              .format("The Pinot column: (%s: %s) doesn't match with the column (%s: %s) in input %s schema.", columnName,
-                  fieldSpec.getDataType().name(), avroColumnName, avroColumnType.name(),
-                  getInputSchemaType()));
+              .format("The Pinot column: (%s: %s) doesn't match with the column (%s: %s) in input %s schema.", columnName, fieldSpec.getDataType().name(),
+                  avroColumnName, avroColumnType.name(), getInputSchemaType()));
         }
       } else {
         // check single-value multi-value mismatch
         if (avroColumnType.ordinal() >= org.apache.avro.Schema.Type.STRING.ordinal()) {
           _singleValueMultiValueFieldMismatch.addMismatchReason(String
-              .format(
-                  "The Pinot column: %s is 'multi-value' column but the column: %s from input %s schema is 'single-value' column.",
-                  columnName, avroColumnName, getInputSchemaType()));
+              .format("The Pinot column: %s is 'multi-value' column but the column: %s from input %s schema is 'single-value' column.", columnName,
+                  avroColumnName, getInputSchemaType()));
         }
         // check data type mismatch
         FieldSpec.DataType dataTypeForMVColumn = AvroUtils.extractFieldDataType(avroColumnField);
         if (fieldSpec.getDataType() != dataTypeForMVColumn) {
           _dataTypeMismatch.addMismatchReason(String
-              .format("The Pinot column: (%s: %s) doesn't match with the column (%s: %s) in input %s schema.",
-                  columnName, fieldSpec.getDataType().name(), avroColumnName, dataTypeForMVColumn.name(),
-                  getInputSchemaType()));
+              .format("The Pinot column: (%s: %s) doesn't match with the column (%s: %s) in input %s schema.", columnName, fieldSpec.getDataType().name(),
+                  avroColumnName, dataTypeForMVColumn.name(), getInputSchemaType()));
         }
         // check multi-value column structure mismatch
         if (avroColumnType != org.apache.avro.Schema.Type.ARRAY) {
           // multi-value column should use array structure for now.
           _multiValueStructureMismatch.addMismatchReason(String.format(
-              "The Pinot column: %s is 'multi-value' column but the column: %s from input %s schema is of '%s' type, which should have been of 'array' type.",
-              columnName, avroColumnName, getInputSchemaType(), avroColumnType.getName()));
+              "The Pinot column: %s is 'multi-value' column but the column: %s from input %s schema is of '%s' type, "
+                  + "which should have been of 'array' type.", columnName, avroColumnName, getInputSchemaType(), avroColumnType.getName()));
         } else {
           org.apache.avro.Schema.Type elementType = avroColumnSchema.getElementType().getType();
           if (elementType.ordinal() < org.apache.avro.Schema.Type.STRING.ordinal()) {
             // even though the column schema is of array type, the element type of that array could be of complex type like array, map, etc.
             _multiValueStructureMismatch.addMismatchReason(String.format(
-                "The Pinot column: %s is 'multi-value' column and it's of 'array' type in input %s schema, but the element type is of '%s' type, which should have been of 'primitive' type.",
-                columnName, getInputSchemaType(), avroColumnSchema.getElementType().getType()));
+                "The Pinot column: %s is 'multi-value' column and it's of 'array' type in input %s schema, but the "
+                    + "element type is of '%s' type, which should have been of 'primitive' type.", columnName, getInputSchemaType(),
+                avroColumnSchema.getElementType().getType()));
           }
         }
       }
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroSchemaUtil.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroSchemaUtil.java
index 9386a91..36b1207 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroSchemaUtil.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroSchemaUtil.java
@@ -27,6 +27,9 @@ import org.apache.pinot.spi.utils.JsonUtils;
 
 
 public class AvroSchemaUtil {
+  private AvroSchemaUtil() {
+  }
+
   /**
    * Returns the data type stored in Pinot that is associated with the given Avro type.
    */
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroUtils.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroUtils.java
index 16674ad..01d0b95 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroUtils.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/main/java/org/apache/pinot/plugin/inputformat/avro/AvroUtils.java
@@ -60,8 +60,8 @@ public class AvroUtils {
    * @param timeUnit Time unit
    * @return Pinot schema
    */
-  public static Schema getPinotSchemaFromAvroSchema(org.apache.avro.Schema avroSchema,
-      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit) {
+  public static Schema getPinotSchemaFromAvroSchema(org.apache.avro.Schema avroSchema, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap,
+      @Nullable TimeUnit timeUnit) {
     Schema pinotSchema = new Schema();
 
     for (Field field : avroSchema.getFields()) {
@@ -87,13 +87,13 @@ public class AvroUtils {
    * @return Pinot schema
    */
   public static Schema getPinotSchemaFromAvroSchemaWithComplexTypeHandling(org.apache.avro.Schema avroSchema,
-      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, List<String> fieldsToUnnest,
-      String delimiter, ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson) {
+      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, List<String> fieldsToUnnest, String delimiter,
+      ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson) {
     Schema pinotSchema = new Schema();
 
     for (Field field : avroSchema.getFields()) {
-      extractSchemaWithComplexTypeHandling(field.schema(), fieldsToUnnest, delimiter, field.name(), pinotSchema,
-          fieldTypeMap, timeUnit, collectionNotUnnestedToJson);
+      extractSchemaWithComplexTypeHandling(field.schema(), fieldsToUnnest, delimiter, field.name(), pinotSchema, fieldTypeMap, timeUnit,
+          collectionNotUnnestedToJson);
     }
     return pinotSchema;
   }
@@ -106,8 +106,7 @@ public class AvroUtils {
    * @param timeUnit Time unit
    * @return Pinot schema
    */
-  public static Schema getPinotSchemaFromAvroDataFile(File avroDataFile,
-      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit)
+  public static Schema getPinotSchemaFromAvroDataFile(File avroDataFile, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit)
       throws IOException {
     try (DataFileStream<GenericRecord> reader = getAvroReader(avroDataFile)) {
       org.apache.avro.Schema avroSchema = reader.getSchema();
@@ -139,16 +138,15 @@ public class AvroUtils {
    * @param collectionNotUnnestedToJson to mode of converting collection to JSON string
    * @return Pinot schema
    */
-  public static Schema getPinotSchemaFromAvroSchemaFile(File avroSchemaFile,
-      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, boolean complexType,
-      List<String> fieldsToUnnest, String delimiter, ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson)
+  public static Schema getPinotSchemaFromAvroSchemaFile(File avroSchemaFile, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap,
+      @Nullable TimeUnit timeUnit, boolean complexType, List<String> fieldsToUnnest, String delimiter,
+      ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson)
       throws IOException {
     org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(avroSchemaFile);
     if (!complexType) {
       return getPinotSchemaFromAvroSchema(avroSchema, fieldTypeMap, timeUnit);
     } else {
-      return getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldTypeMap, timeUnit, fieldsToUnnest,
-          delimiter, collectionNotUnnestedToJson);
+      return getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldTypeMap, timeUnit, fieldsToUnnest, delimiter, collectionNotUnnestedToJson);
     }
   }
 
@@ -284,9 +282,8 @@ public class AvroUtils {
     }
   }
 
-  private static void extractSchemaWithComplexTypeHandling(org.apache.avro.Schema fieldSchema,
-      List<String> fieldsToUnnest, String delimiter, String path, Schema pinotSchema,
-      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit,
+  private static void extractSchemaWithComplexTypeHandling(org.apache.avro.Schema fieldSchema, List<String> fieldsToUnnest, String delimiter, String path,
+      Schema pinotSchema, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit,
       ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson) {
     org.apache.avro.Schema.Type fieldType = fieldSchema.getType();
     switch (fieldType) {
@@ -302,28 +299,25 @@ public class AvroUtils {
           }
         }
         if (nonNullSchema != null) {
-          extractSchemaWithComplexTypeHandling(nonNullSchema, fieldsToUnnest, delimiter, path, pinotSchema, fieldTypeMap,
-              timeUnit, collectionNotUnnestedToJson);
+          extractSchemaWithComplexTypeHandling(nonNullSchema, fieldsToUnnest, delimiter, path, pinotSchema, fieldTypeMap, timeUnit,
+              collectionNotUnnestedToJson);
         } else {
           throw new IllegalStateException("Cannot find non-null schema in UNION schema");
         }
         break;
       case RECORD:
         for (Field innerField : fieldSchema.getFields()) {
-          extractSchemaWithComplexTypeHandling(innerField.schema(), fieldsToUnnest, delimiter,
-              String.join(delimiter, path, innerField.name()), pinotSchema, fieldTypeMap, timeUnit,
-              collectionNotUnnestedToJson);
+          extractSchemaWithComplexTypeHandling(innerField.schema(), fieldsToUnnest, delimiter, String.join(delimiter, path, innerField.name()), pinotSchema,
+              fieldTypeMap, timeUnit, collectionNotUnnestedToJson);
         }
         break;
       case ARRAY:
         org.apache.avro.Schema elementType = fieldSchema.getElementType();
         if (fieldsToUnnest.contains(path)) {
-          extractSchemaWithComplexTypeHandling(elementType, fieldsToUnnest, delimiter, path, pinotSchema, fieldTypeMap,
-              timeUnit, collectionNotUnnestedToJson);
+          extractSchemaWithComplexTypeHandling(elementType, fieldsToUnnest, delimiter, path, pinotSchema, fieldTypeMap, timeUnit, collectionNotUnnestedToJson);
         } else if (collectionNotUnnestedToJson == ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE && AvroSchemaUtil
             .isPrimitiveType(elementType.getType())) {
-          addFieldToPinotSchema(pinotSchema, AvroSchemaUtil.valueOf(elementType.getType()), path, false, fieldTypeMap,
-              timeUnit);
+          addFieldToPinotSchema(pinotSchema, AvroSchemaUtil.valueOf(elementType.getType()), path, false, fieldTypeMap, timeUnit);
         } else if (shallConvertToJson(collectionNotUnnestedToJson, elementType)) {
           addFieldToPinotSchema(pinotSchema, DataType.STRING, path, true, fieldTypeMap, timeUnit);
         }
@@ -332,11 +326,11 @@ public class AvroUtils {
       default:
         DataType dataType = AvroSchemaUtil.valueOf(fieldType);
         addFieldToPinotSchema(pinotSchema, dataType, path, true, fieldTypeMap, timeUnit);
+        break;
     }
   }
 
-  private static boolean shallConvertToJson(ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson,
-      org.apache.avro.Schema elementType) {
+  private static boolean shallConvertToJson(ComplexTypeConfig.CollectionNotUnnestedToJson collectionNotUnnestedToJson, org.apache.avro.Schema elementType) {
     switch (collectionNotUnnestedToJson) {
       case ALL:
         return true;
@@ -349,14 +343,12 @@ public class AvroUtils {
     }
   }
 
-  private static void addFieldToPinotSchema(Schema pinotSchema, DataType dataType, String name,
-      boolean isSingleValueField, @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap,
-      @Nullable TimeUnit timeUnit) {
+  private static void addFieldToPinotSchema(Schema pinotSchema, DataType dataType, String name, boolean isSingleValueField,
+      @Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit) {
     if (fieldTypeMap == null) {
       pinotSchema.addField(new DimensionFieldSpec(name, dataType, isSingleValueField));
     } else {
-      FieldSpec.FieldType fieldType =
-          fieldTypeMap.containsKey(name) ? fieldTypeMap.get(name) : FieldSpec.FieldType.DIMENSION;
+      FieldSpec.FieldType fieldType = fieldTypeMap.containsKey(name) ? fieldTypeMap.get(name) : FieldSpec.FieldType.DIMENSION;
       Preconditions.checkNotNull(fieldType, "Field type not specified for field: %s", name);
       switch (fieldType) {
         case DIMENSION:
@@ -374,9 +366,9 @@ public class AvroUtils {
         case DATE_TIME:
           Preconditions.checkState(isSingleValueField, "Time field: %s cannot be multi-valued", name);
           Preconditions.checkNotNull(timeUnit, "Time unit cannot be null");
-          pinotSchema.addField(new DateTimeFieldSpec(name, dataType,
-              new DateTimeFormatSpec(1, timeUnit.toString(), DateTimeFieldSpec.TimeFormat.EPOCH.toString()).getFormat(),
-              new DateTimeGranularitySpec(1, timeUnit).getGranularity()));
+          pinotSchema.addField(
+              new DateTimeFieldSpec(name, dataType, new DateTimeFormatSpec(1, timeUnit.toString(), DateTimeFieldSpec.TimeFormat.EPOCH.toString()).getFormat(),
+                  new DateTimeGranularitySpec(1, timeUnit).getGranularity()));
           break;
         default:
           throw new UnsupportedOperationException("Unsupported field type: " + fieldType + " for field: " + name);
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorComplexTypesTest.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorComplexTypesTest.java
index d7584ed..de99e93 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorComplexTypesTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorComplexTypesTest.java
@@ -45,51 +45,48 @@ import static org.apache.avro.Schema.*;
 public class AvroRecordExtractorComplexTypesTest extends AbstractRecordExtractorTest {
 
   private final File _dataFile = new File(_tempDir, "complex.avro");
-  Schema avroSchema;
-  Schema intStringMapAvroSchema;
-  Schema stringIntMapAvroSchema;
-  Schema simpleRecordSchema;
-  Schema complexRecordSchema;
-  Schema complexFieldSchema;
-  Schema complexListSchema;
+  private Schema _avroSchema;
+  private Schema _intStringMapAvroSchema;
+  private Schema _stringIntMapAvroSchema;
+  private Schema _simpleRecordSchema;
+  private Schema _complexRecordSchema;
+  private Schema _complexFieldSchema;
+  private Schema _complexListSchema;
 
   @Override
   protected List<Map<String, Object>> getInputRecords() {
 
     // map with int keys
-    intStringMapAvroSchema = createMap(create(Type.STRING));
+    _intStringMapAvroSchema = createMap(create(Type.STRING));
 
     // map with string keys
-    stringIntMapAvroSchema = createMap(create(Type.INT));
+    _stringIntMapAvroSchema = createMap(create(Type.INT));
 
     // simple record - contains a string, long and double array
-    simpleRecordSchema = createRecord("simpleRecord", null, null, false);
-    simpleRecordSchema.setFields(Lists.newArrayList(new Field("simpleField1", create(Type.STRING), null, null),
-        new Field("simpleField2", create(Type.LONG), null, null),
-        new Field("simpleList", createArray(create(Type.DOUBLE)), null, null)));
+    _simpleRecordSchema = createRecord("simpleRecord", null, null, false);
+    _simpleRecordSchema.setFields(Lists
+        .newArrayList(new Field("simpleField1", create(Type.STRING), null, null), new Field("simpleField2", create(Type.LONG), null, null),
+            new Field("simpleList", createArray(create(Type.DOUBLE)), null, null)));
 
     // complex record - contains a string, a complex field (contains int and long)
-    complexRecordSchema = createRecord("complexRecord", null, null, false);
-    complexFieldSchema = createRecord("complexField", null, null, false);
-    complexFieldSchema.setFields(Lists.newArrayList(new Field("field1", create(Type.INT), null, null),
-        new Field("field2", create(Type.LONG), null, null)));
-    complexRecordSchema.setFields(Lists.newArrayList(new Field("simpleField", create(Type.STRING), null, null),
-        new Field("complexField", complexFieldSchema, null, null)));
+    _complexRecordSchema = createRecord("complexRecord", null, null, false);
+    _complexFieldSchema = createRecord("complexField", null, null, false);
+    _complexFieldSchema.setFields(Lists.newArrayList(new Field("field1", create(Type.INT), null, null), new Field("field2", create(Type.LONG), null, null)));
+    _complexRecordSchema
+        .setFields(Lists.newArrayList(new Field("simpleField", create(Type.STRING), null, null), new Field("complexField", _complexFieldSchema, null, null)));
 
     // complex list element - each element contains a record of int and long
-    complexListSchema = createRecord("complexList", null, null, false);
-    complexListSchema.setFields(Lists.newArrayList(new Field("field1", create(Type.INT), null, null),
-        new Field("field2", create(Type.LONG), null, null)));
+    _complexListSchema = createRecord("complexList", null, null, false);
+    _complexListSchema.setFields(Lists.newArrayList(new Field("field1", create(Type.INT), null, null), new Field("field2", create(Type.LONG), null, null)));
 
-    Field map1Field = new Field("map1", intStringMapAvroSchema, null, null);
-    Field map2Field = new Field("map2", stringIntMapAvroSchema, null, null);
-    Field simpleRecordField = new Field("simpleRecord", simpleRecordSchema, null, null);
-    Field complexRecordField = new Field("complexRecord", complexRecordSchema, null, null);
-    Field complexListField = new Field("complexList", createArray(complexListSchema), null, null);
+    Field map1Field = new Field("map1", _intStringMapAvroSchema, null, null);
+    Field map2Field = new Field("map2", _stringIntMapAvroSchema, null, null);
+    Field simpleRecordField = new Field("simpleRecord", _simpleRecordSchema, null, null);
+    Field complexRecordField = new Field("complexRecord", _complexRecordSchema, null, null);
+    Field complexListField = new Field("complexList", createArray(_complexListSchema), null, null);
 
-    avroSchema = createRecord("manyComplexTypes", null, null, false);
-    avroSchema
-        .setFields(Lists.newArrayList(map1Field, map2Field, simpleRecordField, complexRecordField, complexListField));
+    _avroSchema = createRecord("manyComplexTypes", null, null, false);
+    _avroSchema.setFields(Lists.newArrayList(map1Field, map2Field, simpleRecordField, complexRecordField, complexListField));
 
     List<Map<String, Object>> inputRecords = new ArrayList<>(2);
     inputRecords.add(getRecord1());
@@ -110,24 +107,24 @@ public class AvroRecordExtractorComplexTypesTest extends AbstractRecordExtractor
     map2.put("k2", 20000);
     record1.put("map2", map2);
 
-    GenericRecord simpleRecord = new GenericData.Record(simpleRecordSchema);
+    GenericRecord simpleRecord = new GenericData.Record(_simpleRecordSchema);
     simpleRecord.put("simpleField1", "foo");
     simpleRecord.put("simpleField2", 1588469340000L);
     simpleRecord.put("simpleList", Arrays.asList(1.1, 2.2));
     record1.put("simpleRecord", simpleRecord);
 
-    GenericRecord complexRecord = new GenericData.Record(complexRecordSchema);
-    GenericRecord subComplexRecord = new GenericData.Record(complexFieldSchema);
+    GenericRecord complexRecord = new GenericData.Record(_complexRecordSchema);
+    GenericRecord subComplexRecord = new GenericData.Record(_complexFieldSchema);
     subComplexRecord.put("field1", 100);
     subComplexRecord.put("field2", 1588469340000L);
     complexRecord.put("simpleField", "foo");
     complexRecord.put("complexField", subComplexRecord);
     record1.put("complexRecord", complexRecord);
 
-    GenericRecord listElem1 = new GenericData.Record(complexListSchema);
+    GenericRecord listElem1 = new GenericData.Record(_complexListSchema);
     listElem1.put("field1", 20);
     listElem1.put("field2", 2000200020002000L);
-    GenericRecord listElem2 = new GenericData.Record(complexListSchema);
+    GenericRecord listElem2 = new GenericData.Record(_complexListSchema);
     listElem2.put("field1", 280);
     listElem2.put("field2", 8000200020002000L);
     record1.put("complexList", Arrays.asList(listElem1, listElem2));
@@ -147,24 +144,24 @@ public class AvroRecordExtractorComplexTypesTest extends AbstractRecordExtractor
     map2.put("k2", 200);
     record2.put("map2", map2);
 
-    GenericRecord simpleRecord2 = new GenericData.Record(simpleRecordSchema);
+    GenericRecord simpleRecord2 = new GenericData.Record(_simpleRecordSchema);
     simpleRecord2.put("simpleField1", "foo");
     simpleRecord2.put("simpleField2", 1588469340000L);
     simpleRecord2.put("simpleList", Arrays.asList(1.1, 2.2));
     record2.put("simpleRecord", simpleRecord2);
 
-    GenericRecord complexRecord2 = new GenericData.Record(complexRecordSchema);
-    GenericRecord subComplexRecord2 = new GenericData.Record(complexFieldSchema);
+    GenericRecord complexRecord2 = new GenericData.Record(_complexRecordSchema);
+    GenericRecord subComplexRecord2 = new GenericData.Record(_complexFieldSchema);
     subComplexRecord2.put("field1", 100);
     subComplexRecord2.put("field2", 1588469340000L);
     complexRecord2.put("simpleField", "foo");
     complexRecord2.put("complexField", subComplexRecord2);
     record2.put("complexRecord", complexRecord2);
 
-    GenericRecord listElem12 = new GenericData.Record(complexListSchema);
+    GenericRecord listElem12 = new GenericData.Record(_complexListSchema);
     listElem12.put("field1", 20);
     listElem12.put("field2", 2000200020002000L);
-    GenericRecord listElem22 = new GenericData.Record(complexListSchema);
+    GenericRecord listElem22 = new GenericData.Record(_complexListSchema);
     listElem22.put("field1", 280);
     listElem22.put("field2", 8000200020002000L);
     record2.put("complexList", Arrays.asList(listElem12, listElem22));
@@ -195,10 +192,10 @@ public class AvroRecordExtractorComplexTypesTest extends AbstractRecordExtractor
   protected void createInputFile()
       throws IOException {
 
-    try (DataFileWriter<GenericData.Record> fileWriter = new DataFileWriter<>(new GenericDatumWriter<>(avroSchema))) {
-      fileWriter.create(avroSchema, _dataFile);
+    try (DataFileWriter<GenericData.Record> fileWriter = new DataFileWriter<>(new GenericDatumWriter<>(_avroSchema))) {
+      fileWriter.create(_avroSchema, _dataFile);
       for (Map<String, Object> inputRecord : _inputRecords) {
-        GenericData.Record record = new GenericData.Record(avroSchema);
+        GenericData.Record record = new GenericData.Record(_avroSchema);
         for (String columnName : _sourceFieldNames) {
           record.put(columnName, inputRecord.get(columnName));
         }
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorTest.java
index 0dc2467..cf8cc7b 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordExtractorTest.java
@@ -68,13 +68,12 @@ public class AvroRecordExtractorTest extends AbstractRecordExtractorTest {
       throws IOException {
 
     Schema avroSchema = createRecord("eventsRecord", null, null, false);
-    List<Field> fields = Arrays
-        .asList(new Field("user_id", createUnion(Lists.newArrayList(create(Type.INT), create(Type.NULL))), null, null),
-            new Field("firstName", createUnion(Lists.newArrayList(create(Type.STRING), create(Type.NULL))), null, null),
-            new Field("lastName", createUnion(Lists.newArrayList(create(Type.STRING), create(Type.NULL))), null, null),
-            new Field("bids", createUnion(Lists.newArrayList(createArray(create(Type.INT)), create(Type.NULL))), null,
-                null), new Field("campaignInfo", create(Type.STRING), null, null),
-            new Field("cost", create(Type.DOUBLE), null, null), new Field("timestamp", create(Type.LONG), null, null));
+    List<Field> fields = Arrays.asList(new Field("user_id", createUnion(Lists.newArrayList(create(Type.INT), create(Type.NULL))), null, null),
+        new Field("firstName", createUnion(Lists.newArrayList(create(Type.STRING), create(Type.NULL))), null, null),
+        new Field("lastName", createUnion(Lists.newArrayList(create(Type.STRING), create(Type.NULL))), null, null),
+        new Field("bids", createUnion(Lists.newArrayList(createArray(create(Type.INT)), create(Type.NULL))), null, null),
+        new Field("campaignInfo", create(Type.STRING), null, null), new Field("cost", create(Type.DOUBLE), null, null),
+        new Field("timestamp", create(Type.LONG), null, null));
 
     avroSchema.setFields(fields);
 
@@ -98,8 +97,8 @@ public class AvroRecordExtractorTest extends AbstractRecordExtractorTest {
     AvroRecordExtractor avroRecordExtractor = new AvroRecordExtractor();
     avroRecordExtractor.init(null, null);
 
-    org.apache.pinot.spi.data.Schema pinotSchema = new org.apache.pinot.spi.data.Schema.SchemaBuilder()
-        .addSingleValueDimension(testColumnName, FieldSpec.DataType.LONG).build();
+    org.apache.pinot.spi.data.Schema pinotSchema =
+        new org.apache.pinot.spi.data.Schema.SchemaBuilder().addSingleValueDimension(testColumnName, FieldSpec.DataType.LONG).build();
     Schema schema = AvroUtils.getAvroSchemaFromPinotSchema(pinotSchema);
     GenericRecord genericRecord = new GenericData.Record(schema);
     genericRecord.put(testColumnName, columnValue);
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordToPinotRowGeneratorTest.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordToPinotRowGeneratorTest.java
index 45cf943..b556d04 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordToPinotRowGeneratorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroRecordToPinotRowGeneratorTest.java
@@ -35,8 +35,7 @@ public class AvroRecordToPinotRowGeneratorTest {
   @Test
   public void testIncomingTimeColumn()
       throws Exception {
-    List<Schema.Field> avroFields =
-        Collections.singletonList(new Schema.Field("incomingTime", Schema.create(Schema.Type.LONG), null, null));
+    List<Schema.Field> avroFields = Collections.singletonList(new Schema.Field("incomingTime", Schema.create(Schema.Type.LONG), null, null));
     Schema avroSchema = Schema.createRecord(avroFields);
     GenericData.Record avroRecord = new GenericData.Record(avroSchema);
     avroRecord.put("incomingTime", 12345L);
@@ -48,8 +47,7 @@ public class AvroRecordToPinotRowGeneratorTest {
     GenericRow genericRow = new GenericRow();
     avroRecordExtractor.extract(avroRecord, genericRow);
 
-    Assert.assertTrue(
-        genericRow.getFieldToValueMap().keySet().containsAll(Arrays.asList("incomingTime", "outgoingTime")));
+    Assert.assertTrue(genericRow.getFieldToValueMap().keySet().containsAll(Arrays.asList("incomingTime", "outgoingTime")));
     Assert.assertEquals(genericRow.getValue("incomingTime"), 12345L);
   }
 }
diff --git a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroUtilsTest.java b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroUtilsTest.java
index bbe7c59..f4494ee 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroUtilsTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro-base/src/test/java/org/apache/pinot/plugin/inputformat/avro/AvroUtilsTest.java
@@ -36,18 +36,16 @@ import org.testng.collections.Lists;
 
 public class AvroUtilsTest {
 
-  String AVRO_SCHEMA = "fake_avro_schema.avsc";
-  String AVRO_NESTED_SCHEMA = "fake_avro_nested_schema.avsc";
+  private static final String AVRO_SCHEMA = "fake_avro_schema.avsc";
+  private static final String AVRO_NESTED_SCHEMA = "fake_avro_nested_schema.avsc";
 
   @Test
   public void testGetPinotSchemaFromAvroSchemaNullFieldTypeMap()
       throws IOException {
-    org.apache.avro.Schema avroSchema =
-        new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_SCHEMA));
+    org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_SCHEMA));
     Schema inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchema(avroSchema, null, null);
-    Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING)
-        .addSingleValueDimension("d2", DataType.LONG).addSingleValueDimension("d3", DataType.STRING)
-        .addSingleValueDimension("m1", DataType.INT).addSingleValueDimension("m2", DataType.INT)
+    Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addSingleValueDimension("d2", DataType.LONG)
+        .addSingleValueDimension("d3", DataType.STRING).addSingleValueDimension("m1", DataType.INT).addSingleValueDimension("m2", DataType.INT)
         .addSingleValueDimension("hoursSinceEpoch", DataType.LONG).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
   }
@@ -55,26 +53,22 @@ public class AvroUtilsTest {
   @Test
   public void testGetPinotSchemaFromAvroSchemaWithFieldTypeMap()
       throws IOException {
-    org.apache.avro.Schema avroSchema =
-        new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_SCHEMA));
+    org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_SCHEMA));
     Map<String, FieldSpec.FieldType> fieldSpecMap =
-        new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION)
-            .put("d2", FieldType.DIMENSION).put("d3", FieldType.DIMENSION).put("hoursSinceEpoch", FieldType.TIME)
-            .put("m1", FieldType.METRIC).put("m2", FieldType.METRIC).build();
+        new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION).put("d2", FieldType.DIMENSION).put("d3", FieldType.DIMENSION)
+            .put("hoursSinceEpoch", FieldType.TIME).put("m1", FieldType.METRIC).put("m2", FieldType.METRIC).build();
     Schema inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchema(avroSchema, fieldSpecMap, TimeUnit.HOURS);
-    Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING)
-        .addSingleValueDimension("d2", DataType.LONG).addSingleValueDimension("d3", DataType.STRING)
-        .addMetric("m1", DataType.INT).addMetric("m2", DataType.INT)
+    Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addSingleValueDimension("d2", DataType.LONG)
+        .addSingleValueDimension("d3", DataType.STRING).addMetric("m1", DataType.INT).addMetric("m2", DataType.INT)
         .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
 
-    fieldSpecMap = new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION)
-        .put("d2", FieldType.DIMENSION).put("d3", FieldType.DIMENSION).put("hoursSinceEpoch", FieldType.DATE_TIME)
-        .put("m1", FieldType.METRIC).put("m2", FieldType.METRIC).build();
+    fieldSpecMap =
+        new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION).put("d2", FieldType.DIMENSION).put("d3", FieldType.DIMENSION)
+            .put("hoursSinceEpoch", FieldType.DATE_TIME).put("m1", FieldType.METRIC).put("m2", FieldType.METRIC).build();
     inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchema(avroSchema, fieldSpecMap, TimeUnit.HOURS);
-    expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING)
-        .addSingleValueDimension("d2", DataType.LONG).addSingleValueDimension("d3", DataType.STRING)
-        .addMetric("m1", DataType.INT).addMetric("m2", DataType.INT)
+    expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addSingleValueDimension("d2", DataType.LONG)
+        .addSingleValueDimension("d3", DataType.STRING).addMetric("m1", DataType.INT).addMetric("m2", DataType.INT)
         .addDateTime("hoursSinceEpoch", DataType.LONG, "1:HOURS:EPOCH", "1:HOURS").build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
   }
@@ -83,57 +77,45 @@ public class AvroUtilsTest {
   public void testGetPinotSchemaFromAvroSchemaWithComplexType()
       throws IOException {
     // do not unnest collect
-    org.apache.avro.Schema avroSchema =
-        new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_NESTED_SCHEMA));
+    org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_NESTED_SCHEMA));
     Map<String, FieldSpec.FieldType> fieldSpecMap =
-        new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION)
-            .put("hoursSinceEpoch", FieldType.TIME).put("m1", FieldType.METRIC).build();
-    Schema inferredPinotSchema = AvroUtils
-        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS,
-            new ArrayList<>(), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
-    Schema expectedSchema =
-        new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
-            .addSingleValueDimension("tuple.streetaddress", DataType.STRING)
-            .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries", DataType.STRING)
-            .addMultiValueDimension("d2", DataType.INT)
-            .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
+        new ImmutableMap.Builder<String, FieldSpec.FieldType>().put("d1", FieldType.DIMENSION).put("hoursSinceEpoch", FieldType.TIME)
+            .put("m1", FieldType.METRIC).build();
+    Schema inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, new ArrayList<>(), ".",
+        ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
+    Schema expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
+        .addSingleValueDimension("tuple.streetaddress", DataType.STRING).addSingleValueDimension("tuple.city", DataType.STRING)
+        .addSingleValueDimension("entries", DataType.STRING).addMultiValueDimension("d2", DataType.INT)
+        .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
 
     // unnest collection entries
     inferredPinotSchema = AvroUtils
-        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS,
-            Lists.newArrayList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
-    expectedSchema =
-        new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
-            .addSingleValueDimension("tuple.streetaddress", DataType.STRING)
-            .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries.id", DataType.LONG)
-            .addSingleValueDimension("entries.description", DataType.STRING).addMultiValueDimension("d2", DataType.INT)
-            .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
+        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList("entries"), ".",
+            ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
+    expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
+        .addSingleValueDimension("tuple.streetaddress", DataType.STRING).addSingleValueDimension("tuple.city", DataType.STRING)
+        .addSingleValueDimension("entries.id", DataType.LONG).addSingleValueDimension("entries.description", DataType.STRING)
+        .addMultiValueDimension("d2", DataType.INT).addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
 
     // change delimiter
-    inferredPinotSchema = AvroUtils
-        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS,
-            Lists.newArrayList(), "_", ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
-    expectedSchema =
-        new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
-            .addSingleValueDimension("tuple_streetaddress", DataType.STRING)
-            .addSingleValueDimension("tuple_city", DataType.STRING).addSingleValueDimension("entries", DataType.STRING)
-            .addMultiValueDimension("d2", DataType.INT)
-            .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
+    inferredPinotSchema = AvroUtils.getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList(), "_",
+        ComplexTypeConfig.CollectionNotUnnestedToJson.NON_PRIMITIVE);
+    expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
+        .addSingleValueDimension("tuple_streetaddress", DataType.STRING).addSingleValueDimension("tuple_city", DataType.STRING)
+        .addSingleValueDimension("entries", DataType.STRING).addMultiValueDimension("d2", DataType.INT)
+        .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
 
     // change the handling of collection-to-json option, d2 will become string
     inferredPinotSchema = AvroUtils
-        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS,
-            Lists.newArrayList("entries"), ".", ComplexTypeConfig.CollectionNotUnnestedToJson.ALL);
-    expectedSchema =
-        new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
-            .addSingleValueDimension("tuple.streetaddress", DataType.STRING)
-            .addSingleValueDimension("tuple.city", DataType.STRING).addSingleValueDimension("entries.id", DataType.LONG)
-            .addSingleValueDimension("entries.description", DataType.STRING)
-            .addSingleValueDimension("d2", DataType.STRING)
-            .addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
+        .getPinotSchemaFromAvroSchemaWithComplexTypeHandling(avroSchema, fieldSpecMap, TimeUnit.HOURS, Lists.newArrayList("entries"), ".",
+            ComplexTypeConfig.CollectionNotUnnestedToJson.ALL);
+    expectedSchema = new Schema.SchemaBuilder().addSingleValueDimension("d1", DataType.STRING).addMetric("m1", DataType.INT)
+        .addSingleValueDimension("tuple.streetaddress", DataType.STRING).addSingleValueDimension("tuple.city", DataType.STRING)
+        .addSingleValueDimension("entries.id", DataType.LONG).addSingleValueDimension("entries.description", DataType.STRING)
+        .addSingleValueDimension("d2", DataType.STRING).addTime(new TimeGranularitySpec(DataType.LONG, TimeUnit.HOURS, "hoursSinceEpoch"), null).build();
     Assert.assertEquals(expectedSchema, inferredPinotSchema);
   }
 }
diff --git a/pinot-plugins/pinot-input-format/pinot-avro/pom.xml b/pinot-plugins/pinot-input-format/pinot-avro/pom.xml
index bec058e..f1b7ceb 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-avro/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/KafkaAvroMessageDecoder.java b/pinot-plugins/pinot-input-format/pinot-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/KafkaAvroMessageDecoder.java
index 0745b0f..56f8c0c 100644
--- a/pinot-plugins/pinot-input-format/pinot-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/KafkaAvroMessageDecoder.java
+++ b/pinot-plugins/pinot-input-format/pinot-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/KafkaAvroMessageDecoder.java
@@ -90,8 +90,7 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
     _schemaRegistryUrls = parseSchemaRegistryUrls(props.get(SCHEMA_REGISTRY_REST_URL));
 
     String avroSchemaName = topicName;
-    if (props.containsKey(SCHEMA_REGISTRY_SCHEMA_NAME) && props.get(SCHEMA_REGISTRY_SCHEMA_NAME) != null && !props
-        .get(SCHEMA_REGISTRY_SCHEMA_NAME).isEmpty()) {
+    if (props.containsKey(SCHEMA_REGISTRY_SCHEMA_NAME) && props.get(SCHEMA_REGISTRY_SCHEMA_NAME) != null && !props.get(SCHEMA_REGISTRY_SCHEMA_NAME).isEmpty()) {
       avroSchemaName = props.get(SCHEMA_REGISTRY_SCHEMA_NAME);
     }
     // With the logic below, we may not set defaultAvroSchema to be the latest one everytime.
@@ -114,9 +113,9 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
     if (recordExtractorClass == null) {
       recordExtractorClass = AvroRecordExtractor.class.getName();
     }
-    this._avroRecordExtractor = PluginManager.get().createInstance(recordExtractorClass);
+    _avroRecordExtractor = PluginManager.get().createInstance(recordExtractorClass);
     _avroRecordExtractor.init(fieldsToRead, null);
-    this._decoderFactory = new DecoderFactory();
+    _decoderFactory = new DecoderFactory();
     _md5ToAvroSchemaMap = new MD5AvroSchemaMap();
   }
 
@@ -152,8 +151,7 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
             _md5ToAvroSchemaMap.addSchema(_reusableMD5Bytes, schema);
           } catch (Exception e) {
             schema = _defaultAvroSchema;
-            LOGGER
-                .error("Error fetching schema using url {}. Attempting to continue with previous schema", schemaUri, e);
+            LOGGER.error("Error fetching schema using url {}. Attempting to continue with previous schema", schemaUri, e);
             schemaUpdateFailed = true;
           }
         } else {
@@ -164,12 +162,10 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
     }
     DatumReader<Record> reader = new GenericDatumReader<Record>(schema);
     try {
-      GenericData.Record avroRecord = reader.read(null,
-          _decoderFactory.createBinaryDecoder(payload, HEADER_LENGTH + offset, length - HEADER_LENGTH, null));
+      GenericData.Record avroRecord = reader.read(null, _decoderFactory.createBinaryDecoder(payload, HEADER_LENGTH + offset, length - HEADER_LENGTH, null));
       return _avroRecordExtractor.extract(avroRecord, destination);
     } catch (IOException e) {
-      LOGGER.error("Caught exception while reading message using schema {}{}",
-          (schema == null ? "null" : schema.getName()),
+      LOGGER.error("Caught exception while reading message using schema {}{}", (schema == null ? "null" : schema.getName()),
           (schemaUpdateFailed ? "(possibly due to schema update failure)" : ""), e);
       return null;
     }
@@ -189,25 +185,24 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
 
   private static class SchemaFetcher implements Callable<Boolean> {
     private org.apache.avro.Schema _schema;
-    private URL url;
+    private URL _url;
     private boolean _isSuccessful = false;
 
     SchemaFetcher(URL url) {
-      this.url = url;
+      _url = url;
     }
 
     @Override
     public Boolean call()
         throws Exception {
       try {
-        URLConnection conn = url.openConnection();
+        URLConnection conn = _url.openConnection();
         conn.setConnectTimeout(15000);
         conn.setReadTimeout(15000);
-        LOGGER.info("Fetching schema using url {}", url.toString());
+        LOGGER.info("Fetching schema using url {}", _url.toString());
 
         StringBuilder queryResp = new StringBuilder();
-        try (BufferedReader reader = new BufferedReader(
-            new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) {
+        try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) {
           for (String line = reader.readLine(); line != null; line = reader.readLine()) {
             queryResp.append(line);
           }
@@ -215,7 +210,7 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
 
         _schema = org.apache.avro.Schema.parse(queryResp.toString());
 
-        LOGGER.info("Schema fetch succeeded on url {}", url.toString());
+        LOGGER.info("Schema fetch succeeded on url {}", _url.toString());
         return Boolean.TRUE;
       } catch (Exception e) {
         LOGGER.warn("Caught exception while fetching schema", e);
@@ -232,8 +227,8 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
       throws Exception {
     SchemaFetcher schemaFetcher = new SchemaFetcher(makeRandomUrl(reference));
     RetryPolicies
-        .exponentialBackoffRetryPolicy(MAXIMUM_SCHEMA_FETCH_RETRY_COUNT, MINIMUM_SCHEMA_FETCH_RETRY_TIME_MILLIS,
-            SCHEMA_FETCH_RETRY_EXPONENTIAL_BACKOFF_FACTOR).attempt(schemaFetcher);
+        .exponentialBackoffRetryPolicy(MAXIMUM_SCHEMA_FETCH_RETRY_COUNT, MINIMUM_SCHEMA_FETCH_RETRY_TIME_MILLIS, SCHEMA_FETCH_RETRY_EXPONENTIAL_BACKOFF_FACTOR)
+        .attempt(schemaFetcher);
     return schemaFetcher.getSchema();
   }
 
@@ -245,15 +240,15 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
    * </ul>
    */
   private static class MD5AvroSchemaMap {
-    private List<byte[]> md5s;
-    private List<org.apache.avro.Schema> schemas;
+    private List<byte[]> _md5s;
+    private List<org.apache.avro.Schema> _schemas;
 
     /**
      * Constructor for the class.
      */
     private MD5AvroSchemaMap() {
-      md5s = new ArrayList<>();
-      schemas = new ArrayList<>();
+      _md5s = new ArrayList<>();
+      _schemas = new ArrayList<>();
     }
 
     /**
@@ -263,9 +258,9 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
      * @return Avro schema for the given MD5.
      */
     private org.apache.avro.Schema getSchema(byte[] md5ForSchema) {
-      for (int i = 0; i < md5s.size(); i++) {
-        if (Arrays.equals(md5s.get(i), md5ForSchema)) {
-          return schemas.get(i);
+      for (int i = 0; i < _md5s.size(); i++) {
+        if (Arrays.equals(_md5s.get(i), md5ForSchema)) {
+          return _schemas.get(i);
         }
       }
       return null;
@@ -279,8 +274,8 @@ public class KafkaAvroMessageDecoder implements StreamMessageDecoder<byte[]> {
      * @param schema Avro Schema
      */
     private void addSchema(byte[] md5, org.apache.avro.Schema schema) {
-      md5s.add(Arrays.copyOf(md5, md5.length));
-      schemas.add(schema);
+      _md5s.add(Arrays.copyOf(md5, md5.length));
+      _schemas.add(schema);
     }
   }
 
diff --git a/pinot-plugins/pinot-input-format/pinot-confluent-avro/pom.xml b/pinot-plugins/pinot-input-format/pinot-confluent-avro/pom.xml
index ea20981..02b479d 100644
--- a/pinot-plugins/pinot-input-format/pinot-confluent-avro/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-confluent-avro/pom.xml
@@ -37,9 +37,6 @@
     <kafka.lib.version>2.0.0</kafka.lib.version>
     <confluent.version>5.3.1</confluent.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <repositories>
     <repository>
diff --git a/pinot-plugins/pinot-input-format/pinot-confluent-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/confluent/KafkaConfluentSchemaRegistryAvroMessageDecoder.java b/pinot-plugins/pinot-input-format/pinot-confluent-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/confluent/KafkaConfluentSchemaRegistryAvroMessageDecoder.java
index 3dc737e..a581e20 100644
--- a/pinot-plugins/pinot-input-format/pinot-confluent-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/confluent/KafkaConfluentSchemaRegistryAvroMessageDecoder.java
+++ b/pinot-plugins/pinot-input-format/pinot-confluent-avro/src/main/java/org/apache/pinot/plugin/inputformat/avro/confluent/KafkaConfluentSchemaRegistryAvroMessageDecoder.java
@@ -57,7 +57,6 @@ public class KafkaConfluentSchemaRegistryAvroMessageDecoder implements StreamMes
   public RestService createRestService(String schemaRegistryUrl, Map<String, String> configs) {
     RestService restService = new RestService(schemaRegistryUrl);
 
-
     ConfigDef configDef = new ConfigDef();
     SslConfigs.addClientSslSupport(configDef);
     Map<String, ConfigDef.ConfigKey> configKeyMap = configDef.configKeys();
@@ -65,19 +64,18 @@ public class KafkaConfluentSchemaRegistryAvroMessageDecoder implements StreamMes
     for (String key : configs.keySet()) {
       if (!key.equals(SCHEMA_REGISTRY_REST_URL) && key.startsWith(SCHEMA_REGISTRY_OPTS_PREFIX)) {
         String value = configs.get(key);
-        key = key.substring(SCHEMA_REGISTRY_OPTS_PREFIX.length());
+        String schemaRegistryOptKey = key.substring(SCHEMA_REGISTRY_OPTS_PREFIX.length());
 
-        if (configKeyMap.containsKey(key)) {
-          if (configKeyMap.get(key).type == ConfigDef.Type.PASSWORD) {
-            sslConfigs.put(key, new Password(value));
+        if (configKeyMap.containsKey(schemaRegistryOptKey)) {
+          if (configKeyMap.get(schemaRegistryOptKey).type == ConfigDef.Type.PASSWORD) {
+            sslConfigs.put(schemaRegistryOptKey, new Password(value));
           } else {
-            sslConfigs.put(key, value);
+            sslConfigs.put(schemaRegistryOptKey, value);
           }
         }
       }
     }
 
-
     if (!sslConfigs.isEmpty()) {
       SslFactory sslFactory = new SslFactory(Mode.CLIENT);
       sslFactory.configure(sslConfigs);
@@ -91,10 +89,7 @@ public class KafkaConfluentSchemaRegistryAvroMessageDecoder implements StreamMes
       throws Exception {
     checkState(props.containsKey(SCHEMA_REGISTRY_REST_URL), "Missing required property '%s'", SCHEMA_REGISTRY_REST_URL);
     String schemaRegistryUrl = props.get(SCHEMA_REGISTRY_REST_URL);
-    SchemaRegistryClient schemaRegistryClient =
-            new CachedSchemaRegistryClient(
-                    createRestService(schemaRegistryUrl, props),
-                    1000, props);
+    SchemaRegistryClient schemaRegistryClient = new CachedSchemaRegistryClient(createRestService(schemaRegistryUrl, props), 1000, props);
 
     _deserializer = new KafkaAvroDeserializer(schemaRegistryClient);
     Preconditions.checkNotNull(topicName, "Topic must be provided");
diff --git a/pinot-plugins/pinot-input-format/pinot-csv/pom.xml b/pinot-plugins/pinot-input-format/pinot-csv/pom.xml
index 99f152a..cd43d0c 100644
--- a/pinot-plugins/pinot-input-format/pinot-csv/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-csv/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-csv/src/main/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorConfig.java b/pinot-plugins/pinot-input-format/pinot-csv/src/main/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorConfig.java
index 803d6b7..9aa55b5 100644
--- a/pinot-plugins/pinot-input-format/pinot-csv/src/main/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorConfig.java
+++ b/pinot-plugins/pinot-input-format/pinot-csv/src/main/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorConfig.java
@@ -50,8 +50,7 @@ public class CSVRecordExtractorConfig implements RecordExtractorConfig {
   public Set<String> getColumnNames() {
     if (_columnNames == null) {
       throw new IllegalStateException(
-          "CSV column names must be set in " + this.getClass().getName()
-              + " if the fields to extract are not explicitly provided.");
+          "CSV column names must be set in " + this.getClass().getName() + " if the fields to extract are not explicitly provided.");
     }
     return _columnNames;
   }
diff --git a/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorTest.java
index 93994c9..169ba92 100644
--- a/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordExtractorTest.java
@@ -64,8 +64,7 @@ public class CSVRecordExtractorTest extends AbstractRecordExtractorTest {
   public void createInputFile()
       throws IOException {
     String[] header = _sourceFieldNames.toArray(new String[0]);
-    try (FileWriter fileWriter = new FileWriter(_dataFile); CSVPrinter csvPrinter = new CSVPrinter(fileWriter,
-        CSVFormat.DEFAULT.withHeader(header))) {
+    try (FileWriter fileWriter = new FileWriter(_dataFile); CSVPrinter csvPrinter = new CSVPrinter(fileWriter, CSVFormat.DEFAULT.withHeader(header))) {
 
       for (Map<String, Object> inputRecord : _inputRecords) {
         Object[] record = new Object[header.length];
@@ -110,7 +109,7 @@ public class CSVRecordExtractorTest extends AbstractRecordExtractorTest {
    */
   @Test
   public void testEscapeCharacterInCSV()
-    throws Exception {
+      throws Exception {
     // Create CSV config with backslash as escape character.
     CSVRecordReaderConfig csvRecordReaderConfig = new CSVRecordReaderConfig();
     csvRecordReaderConfig.setEscapeCharacter('\\');
diff --git a/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordReaderTest.java
index be8afc3..9cd98d3 100644
--- a/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-csv/src/test/java/org/apache/pinot/plugin/inputformat/csv/CSVRecordReaderTest.java
@@ -35,7 +35,7 @@ import org.testng.Assert;
 
 
 public class CSVRecordReaderTest extends AbstractRecordReaderTest {
-  private static char CSV_MULTI_VALUE_DELIMITER = '\t';
+  private static final char CSV_MULTI_VALUE_DELIMITER = '\t';
   private final File _dataFile = new File(_tempDir, "data.csv");
 
   @Override
@@ -54,8 +54,7 @@ public class CSVRecordReaderTest extends AbstractRecordReaderTest {
 
     Schema pinotSchema = getPinotSchema();
     String[] columns = pinotSchema.getColumnNames().toArray(new String[0]);
-    try (FileWriter fileWriter = new FileWriter(_dataFile);
-        CSVPrinter csvPrinter = new CSVPrinter(fileWriter, CSVFormat.DEFAULT.withHeader(columns))) {
+    try (FileWriter fileWriter = new FileWriter(_dataFile); CSVPrinter csvPrinter = new CSVPrinter(fileWriter, CSVFormat.DEFAULT.withHeader(columns))) {
 
       for (Map<String, Object> r : recordsToWrite) {
         Object[] record = new Object[columns.length];
@@ -72,8 +71,7 @@ public class CSVRecordReaderTest extends AbstractRecordReaderTest {
   }
 
   @Override
-  protected void checkValue(RecordReader recordReader, List<Map<String, Object>> expectedRecordsMap,
-      List<Object[]> expectedPrimaryKeys)
+  protected void checkValue(RecordReader recordReader, List<Map<String, Object>> expectedRecordsMap, List<Object[]> expectedPrimaryKeys)
       throws Exception {
     for (int i = 0; i < expectedRecordsMap.size(); i++) {
       Map<String, Object> expectedRecord = expectedRecordsMap.get(i);
@@ -81,8 +79,7 @@ public class CSVRecordReaderTest extends AbstractRecordReaderTest {
       for (FieldSpec fieldSpec : _pinotSchema.getAllFieldSpecs()) {
         String fieldSpecName = fieldSpec.getName();
         if (fieldSpec.isSingleValueField()) {
-          Assert.assertEquals(actualRecord.getValue(fieldSpecName).toString(),
-              expectedRecord.get(fieldSpecName).toString());
+          Assert.assertEquals(actualRecord.getValue(fieldSpecName).toString(), expectedRecord.get(fieldSpecName).toString());
         } else {
           List expectedRecords = (List) expectedRecord.get(fieldSpecName);
           if (expectedRecords.size() == 1) {
diff --git a/pinot-plugins/pinot-input-format/pinot-json/pom.xml b/pinot-plugins/pinot-input-format/pinot-json/pom.xml
index 0ce997b..f97778c 100644
--- a/pinot-plugins/pinot-input-format/pinot-json/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-json/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-json/src/main/java/org/apache/pinot/plugin/inputformat/json/JSONMessageDecoder.java b/pinot-plugins/pinot-input-format/pinot-json/src/main/java/org/apache/pinot/plugin/inputformat/json/JSONMessageDecoder.java
index 822ac20..9f87a0b 100644
--- a/pinot-plugins/pinot-input-format/pinot-json/src/main/java/org/apache/pinot/plugin/inputformat/json/JSONMessageDecoder.java
+++ b/pinot-plugins/pinot-input-format/pinot-json/src/main/java/org/apache/pinot/plugin/inputformat/json/JSONMessageDecoder.java
@@ -39,8 +39,7 @@ import org.slf4j.LoggerFactory;
 public class JSONMessageDecoder implements StreamMessageDecoder<byte[]> {
   private static final Logger LOGGER = LoggerFactory.getLogger(JSONMessageDecoder.class);
   private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
-  private static final String JSON_RECORD_EXTRACTOR_CLASS =
-      "org.apache.pinot.plugin.inputformat.json.JSONRecordExtractor";
+  private static final String JSON_RECORD_EXTRACTOR_CLASS = "org.apache.pinot.plugin.inputformat.json.JSONRecordExtractor";
 
   private RecordExtractor<Map<String, Object>> _jsonRecordExtractor;
 
diff --git a/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordExtractorTest.java
index 48df469..7524264 100644
--- a/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordExtractorTest.java
@@ -60,9 +60,8 @@ public class JSONRecordExtractorTest extends AbstractRecordExtractorTest {
 
   @Override
   protected Set<String> getSourceFields() {
-    return Sets.newHashSet(NULL_FIELD, INT_FIELD, LONG_FIELD, DOUBLE_FIELD, STRING_FIELD, INT_ARRAY_FIELD,
-        DOUBLE_ARRAY_FIELD, STRING_ARRAY_FIELD, COMPLEX_ARRAY_1_FIELD, COMPLEX_ARRAY_2_FIELD, MAP_1_FIELD,
-        MAP_2_FIELD);
+    return Sets.newHashSet(NULL_FIELD, INT_FIELD, LONG_FIELD, DOUBLE_FIELD, STRING_FIELD, INT_ARRAY_FIELD, DOUBLE_ARRAY_FIELD, STRING_ARRAY_FIELD,
+        COMPLEX_ARRAY_1_FIELD, COMPLEX_ARRAY_2_FIELD, MAP_1_FIELD, MAP_2_FIELD);
   }
 
   /**
diff --git a/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordReaderTest.java
index 81ab499..ef99abf 100644
--- a/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-json/src/test/java/org/apache/pinot/plugin/inputformat/json/JSONRecordReaderTest.java
@@ -58,8 +58,7 @@ public class JSONRecordReaderTest extends AbstractRecordReaderTest {
   }
 
   @Override
-  protected void checkValue(RecordReader recordReader, List<Map<String, Object>> expectedRecordsMap,
-      List<Object[]> expectedPrimaryKeys)
+  protected void checkValue(RecordReader recordReader, List<Map<String, Object>> expectedRecordsMap, List<Object[]> expectedPrimaryKeys)
       throws Exception {
     for (int i = 0; i < expectedRecordsMap.size(); i++) {
       Map<String, Object> expectedRecord = expectedRecordsMap.get(i);
@@ -67,8 +66,7 @@ public class JSONRecordReaderTest extends AbstractRecordReaderTest {
       for (FieldSpec fieldSpec : _pinotSchema.getAllFieldSpecs()) {
         String fieldSpecName = fieldSpec.getName();
         if (fieldSpec.isSingleValueField()) {
-          Assert.assertEquals(actualRecord.getValue(fieldSpecName).toString(),
-              expectedRecord.get(fieldSpecName).toString());
+          Assert.assertEquals(actualRecord.getValue(fieldSpecName).toString(), expectedRecord.get(fieldSpecName).toString());
         } else {
           Object[] actualRecords = (Object[]) actualRecord.getValue(fieldSpecName);
           List expectedRecords = (List) expectedRecord.get(fieldSpecName);
diff --git a/pinot-plugins/pinot-input-format/pinot-orc/pom.xml b/pinot-plugins/pinot-input-format/pinot-orc/pom.xml
index 4b5c591..bb39008 100644
--- a/pinot-plugins/pinot-input-format/pinot-orc/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-orc/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-orc/src/main/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReader.java b/pinot-plugins/pinot-input-format/pinot-orc/src/main/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReader.java
index 7eea428..06d1f15 100644
--- a/pinot-plugins/pinot-input-format/pinot-orc/src/main/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReader.java
+++ b/pinot-plugins/pinot-input-format/pinot-orc/src/main/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReader.java
@@ -76,11 +76,10 @@ public class ORCRecordReader implements RecordReader {
   public void init(File dataFile, @Nullable Set<String> fieldsToRead, @Nullable RecordReaderConfig recordReaderConfig)
       throws IOException {
     Configuration configuration = new Configuration();
-    Reader orcReader = OrcFile.createReader(new Path(dataFile.getAbsolutePath()),
-        OrcFile.readerOptions(configuration).filesystem(FileSystem.getLocal(configuration)));
+    Reader orcReader =
+        OrcFile.createReader(new Path(dataFile.getAbsolutePath()), OrcFile.readerOptions(configuration).filesystem(FileSystem.getLocal(configuration)));
     TypeDescription orcSchema = orcReader.getSchema();
-    Preconditions
-        .checkState(orcSchema.getCategory() == TypeDescription.Category.STRUCT, "ORC schema must be of type: STRUCT");
+    Preconditions.checkState(orcSchema.getCategory() == TypeDescription.Category.STRUCT, "ORC schema must be of type: STRUCT");
     _orcFields = orcSchema.getFieldNames();
     _orcFieldTypes = orcSchema.getChildren();
 
@@ -127,8 +126,7 @@ public class ORCRecordReader implements RecordReader {
       // Maps always have two child columns for its keys and values
       List<TypeDescription> children = fieldType.getChildren();
       TypeDescription.Category keyCategory = children.get(0).getCategory();
-      Preconditions.checkState(isSupportedSingleValueType(keyCategory),
-          "Illegal map key field type: %s (field %s)", keyCategory, field);
+      Preconditions.checkState(isSupportedSingleValueType(keyCategory), "Illegal map key field type: %s (field %s)", keyCategory, field);
       initFieldsToRead(orcReaderInclude, children.get(1), field);
     } else if (category == TypeDescription.Category.STRUCT) {
       List<String> childrenFieldNames = fieldType.getFieldNames();
@@ -139,8 +137,7 @@ public class ORCRecordReader implements RecordReader {
       }
     } else {
       // Single-value field
-      Preconditions
-          .checkState(isSupportedSingleValueType(category), "Illegal single-value field type: %s (field %s)", category, field);
+      Preconditions.checkState(isSupportedSingleValueType(category), "Illegal single-value field type: %s (field %s)", category, field);
     }
   }
 
@@ -220,7 +217,7 @@ public class ORCRecordReader implements RecordReader {
         int length = (int) listColumnVector.lengths[rowId];
         List<Object> values = new ArrayList<>(length);
         for (int j = 0; j < length; j++) {
-          Object value = extractValue(field, listColumnVector.child, childType,offset + j);
+          Object value = extractValue(field, listColumnVector.child, childType, offset + j);
           // NOTE: Only keep non-null values
           if (value != null) {
             values.add(value);
@@ -268,8 +265,7 @@ public class ORCRecordReader implements RecordReader {
 
         Map<Object, Object> convertedMap = new HashMap<>();
         for (int i = 0; i < childrenFieldNames.size(); i++) {
-          convertedMap.put(childrenFieldNames.get(i),
-              extractValue(childrenFieldNames.get(i), structColumnVector.fields[i], childrenFieldTypes.get(i), rowId));
+          convertedMap.put(childrenFieldNames.get(i), extractValue(childrenFieldNames.get(i), structColumnVector.fields[i], childrenFieldTypes.get(i), rowId));
         }
         return convertedMap;
       } else {
@@ -281,7 +277,6 @@ public class ORCRecordReader implements RecordReader {
     }
   }
 
-
   @Nullable
   private static Object extractSingleValue(String field, ColumnVector columnVector, int rowId, TypeDescription.Category category) {
     if (columnVector.isRepeating) {
diff --git a/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordExtractorTest.java
index f580c09..67755a0 100644
--- a/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordExtractorTest.java
@@ -68,6 +68,8 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
   @Override
   protected void createInputFile()
       throws IOException {
+    // CHECKSTYLE:OFF
+    // @format:off
     TypeDescription schema = TypeDescription.fromString(
         "struct<"
             + "userID:int,"
@@ -82,6 +84,8 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
             + "complexMap:map<string,struct<doubleField:double,stringField:string>>"
             + ">"
     );
+    // @format:on
+    // CHECKSTYLE:ON
 
     int numRecords = _inputRecords.size();
     VectorizedRowBatch rowBatch = schema.createRowBatch(numRecords);
@@ -142,8 +146,7 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
     BytesColumnVector complexMapValueBytesVector = (BytesColumnVector) complexMapValuesVector.fields[1];
     complexMapValueBytesVector.ensureSize(6, false);
 
-    Writer writer = OrcFile.createWriter(new Path(_dataFile.getAbsolutePath()),
-        OrcFile.writerOptions(new Configuration()).setSchema(schema));
+    Writer writer = OrcFile.createWriter(new Path(_dataFile.getAbsolutePath()), OrcFile.writerOptions(new Configuration()).setSchema(schema));
     for (int i = 0; i < numRecords; i++) {
       Map<String, Object> record = _inputRecords.get(i);
 
@@ -204,10 +207,8 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
       if (complexStruct != null) {
         complexStructBytesVector.setVal(i, StringUtils.encodeUtf8((String) complexStruct.get("structString")));
         // Set nested struct vector
-        complexStructIntVector.vector[i] = (Integer) ((Map<String, Object>) complexStruct.get("nestedStruct"))
-            .get("nestedStructInt");
-        complexStructLongVector.vector[i] = (Long) ((Map<String, Object>) complexStruct.get("nestedStruct"))
-            .get("nestedStructLong");
+        complexStructIntVector.vector[i] = (Integer) ((Map<String, Object>) complexStruct.get("nestedStruct")).get("nestedStructInt");
+        complexStructLongVector.vector[i] = (Long) ((Map<String, Object>) complexStruct.get("nestedStruct")).get("nestedStructLong");
       } else {
         complexStructVector.isNull[i] = true;
       }
@@ -252,38 +253,23 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
   @Override
   protected List<Map<String, Object>> getInputRecords() {
     // simple struct - contains a string, long and double array
-    Map[] simpleStructs = new Map[]{
-        null,
-        createStructInput("structString", "abc", "structLong", 1000L, "structDouble", 5.99999),
-        createStructInput("structString", "def", "structLong", 2000L, "structDouble", 6.99999),
-        createStructInput("structString", "ghi", "structLong", 3000L, "structDouble", 7.99999)
-    };
+    Map[] simpleStructs =
+        new Map[]{null, createStructInput("structString", "abc", "structLong", 1000L, "structDouble", 5.99999), createStructInput("structString", "def",
+            "structLong", 2000L, "structDouble", 6.99999), createStructInput("structString", "ghi", "structLong", 3000L, "structDouble", 7.99999)
+        };
 
     // complex struct - contains a string and nested struct of int and long
-    Map[] complexStructs = new Map[] {
-        createStructInput("structString", "abc", "nestedStruct",
-            createStructInput("nestedStructInt", 4, "nestedStructLong", 4000L)),
-        createStructInput("structString", "def", "nestedStruct",
-            createStructInput("nestedStructInt", 5, "nestedStructLong", 5000L)),
-        null,
-        createStructInput("structString", "ghi", "nestedStruct",
-            createStructInput("nestedStructInt", 6, "nestedStructLong", 6000L))
+    Map[] complexStructs = new Map[]{createStructInput("structString", "abc", "nestedStruct",
+        createStructInput("nestedStructInt", 4, "nestedStructLong", 4000L)), createStructInput("structString", "def", "nestedStruct",
+        createStructInput("nestedStructInt", 5, "nestedStructLong", 5000L)), null, createStructInput("structString", "ghi", "nestedStruct",
+        createStructInput("nestedStructInt", 6, "nestedStructLong", 6000L))
     };
 
     // complex list element - each element contains a struct of int and double
-    List[] complexLists = new List[]{
-        Arrays.asList(
-            createStructInput("complexListInt", 10, "complexListDouble", 100.0),
-            createStructInput("complexListInt", 20, "complexListDouble", 200.0)
-        ),
-        null,
-        Collections.singletonList(
-            createStructInput("complexListInt", 30, "complexListDouble", 300.0)
-        ),
-        Arrays.asList(
-            createStructInput("complexListInt", 40, "complexListDouble", 400.0),
-            createStructInput("complexListInt", 50, "complexListDouble", 500.0)
-        )
+    List[] complexLists = new List[]{Arrays.asList(createStructInput("complexListInt", 10, "complexListDouble", 100.0),
+        createStructInput("complexListInt", 20, "complexListDouble", 200.0)), null, Collections.singletonList(
+        createStructInput("complexListInt", 30, "complexListDouble", 300.0)), Arrays.asList(createStructInput("complexListInt", 40, "complexListDouble", 400.0),
+        createStructInput("complexListInt", 50, "complexListDouble", 500.0))
     };
 
     // single value integer
@@ -302,26 +288,14 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
     long[] timestamp = new long[]{1570863600000L, 1571036400000L, 1571900400000L, 1574000000000L};
 
     // simple map with string keys and integer values
-    Map[] simpleMaps = new Map[]{
-        createStructInput("key1", 10, "key2", 20),
-        null,
-        createStructInput("key3", 30),
-        createStructInput("key4", 40, "key5", 50)
+    Map[] simpleMaps = new Map[]{createStructInput("key1", 10, "key2", 20), null, createStructInput("key3", 30), createStructInput("key4", 40, "key5", 50)
     };
 
     // complex map with struct values - struct contains double and string
-    Map[] complexMap = new Map[] {
-        createStructInput("key1", createStructInput("doubleField", 2.0, "stringField", "abc")),
-        null,
-        createStructInput(
-            "key1", createStructInput("doubleField", 3.0, "stringField", "xyz"),
-            "key2", createStructInput("doubleField", 4.0, "stringField", "abc123")
-        ),
-        createStructInput(
-            "key1", createStructInput("doubleField", 3.0, "stringField", "xyz"),
-            "key2", createStructInput("doubleField", 4.0, "stringField", "abc123"),
-            "key3", createStructInput("doubleField", 4.0, "stringField", "asdf")
-        )
+    Map[] complexMap = new Map[]{createStructInput("key1", createStructInput("doubleField", 2.0, "stringField", "abc")), null, createStructInput("key1",
+        createStructInput("doubleField", 3.0, "stringField", "xyz"), "key2", createStructInput("doubleField", 4.0, "stringField", "abc123")), createStructInput(
+        "key1", createStructInput("doubleField", 3.0, "stringField", "xyz"), "key2", createStructInput("doubleField", 4.0, "stringField", "abc123"), "key3",
+        createStructInput("doubleField", 4.0, "stringField", "asdf"))
     };
 
     List<Map<String, Object>> inputRecords = new ArrayList<>(4);
@@ -345,8 +319,7 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
 
   @Override
   protected Set<String> getSourceFields() {
-    return Sets.newHashSet("userID", "firstName", "bids", "cost",
-        "timestamp", "simpleMap", "simpleStruct", "complexStruct", "complexList", "complexMap");
+    return Sets.newHashSet("userID", "firstName", "bids", "cost", "timestamp", "simpleMap", "simpleStruct", "complexStruct", "complexList", "complexMap");
   }
 
   private Map<String, Object> createStructInput(String fieldName1, Object value1) {
@@ -362,8 +335,7 @@ public class ORCRecordExtractorTest extends AbstractRecordExtractorTest {
     return struct;
   }
 
-  private Map<String, Object> createStructInput(String fieldName1, Object value1, String fieldName2, Object value2,
-      String fieldName3, Object value3) {
+  private Map<String, Object> createStructInput(String fieldName1, Object value1, String fieldName2, Object value2, String fieldName3, Object value3) {
     Map<String, Object> struct = new HashMap<>(3);
     struct.put(fieldName1, value1);
     struct.put(fieldName2, value2);
diff --git a/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReaderTest.java
index 69c5966..54aaf7c 100644
--- a/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-orc/src/test/java/org/apache/pinot/plugin/inputformat/orc/ORCRecordReaderTest.java
@@ -51,10 +51,10 @@ public class ORCRecordReaderTest extends AbstractRecordReaderTest {
   @Override
   protected void writeRecordsToFile(List<Map<String, Object>> recordsToWrite)
       throws Exception {
-    TypeDescription schema = TypeDescription.fromString(
-        "struct<dim_sv_int:int,dim_sv_long:bigint,dim_sv_float:float,dim_sv_double:double,dim_sv_string:string,dim_mv_int:array<int>,dim_mv_long:array<bigint>,dim_mv_float:array<float>,dim_mv_double:array<double>,dim_mv_string:array<string>,met_int:int,met_long:bigint,met_float:float,met_double:double,extra_field:struct<f1:int,f2:int>>");
-    Writer writer = OrcFile.createWriter(new Path(_dataFile.getAbsolutePath()),
-        OrcFile.writerOptions(new Configuration()).setSchema(schema));
+    TypeDescription schema = TypeDescription.fromString("struct<dim_sv_int:int,dim_sv_long:bigint,dim_sv_float:float,dim_sv_double:double,dim_sv_string:string,"
+        + "dim_mv_int:array<int>,dim_mv_long:array<bigint>,dim_mv_float:array<float>,dim_mv_double:array<double>,"
+        + "dim_mv_string:array<string>,met_int:int,met_long:bigint,met_float:float,met_double:double,extra_field:struct<f1:int,f2:int>>");
+    Writer writer = OrcFile.createWriter(new Path(_dataFile.getAbsolutePath()), OrcFile.writerOptions(new Configuration()).setSchema(schema));
 
     VectorizedRowBatch rowBatch = schema.createRowBatch();
     int numRowsPerBatch = rowBatch.getMaxSize();
diff --git a/pinot-plugins/pinot-input-format/pinot-parquet/pom.xml b/pinot-plugins/pinot-input-format/pinot-parquet/pom.xml
index 1cd7c4b..3168c3e 100644
--- a/pinot-plugins/pinot-input-format/pinot-parquet/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-parquet/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordExtractor.java b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordExtractor.java
index 46b989e..d89236e 100644
--- a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordExtractor.java
+++ b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordExtractor.java
@@ -162,19 +162,20 @@ public class ParquetNativeRecordExtractor extends BaseRecordExtractor<Group> {
           }
           if (originalType == OriginalType.DECIMAL) {
             DecimalMetadata decimalMetadata = fieldType.asPrimitiveType().getDecimalMetadata();
-            return binaryToDecimal(from.getBinary(fieldIndex, index), decimalMetadata.getPrecision(),
-                decimalMetadata.getScale());
+            return binaryToDecimal(from.getBinary(fieldIndex, index), decimalMetadata.getPrecision(), decimalMetadata.getScale());
           }
           return from.getBinary(fieldIndex, index).getBytes();
         case INT96:
           Binary int96 = from.getInt96(fieldIndex, index);
           ByteBuffer buf = ByteBuffer.wrap(int96.getBytes()).order(ByteOrder.LITTLE_ENDIAN);
-          long dateTime = (buf.getInt(8) - JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) * DateTimeConstants.MILLIS_PER_DAY
-              + buf.getLong(0) / NANOS_PER_MILLISECOND;
+          long dateTime = (buf.getInt(8) - JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) * DateTimeConstants.MILLIS_PER_DAY + buf.getLong(0) / NANOS_PER_MILLISECOND;
           return dateTime;
+        default:
+          throw new IllegalArgumentException(
+              "Unsupported field type: " + fieldType + ", primitive type: " + fieldType.asPrimitiveType().getPrimitiveTypeName());
       }
-    } else if ((fieldType.isRepetition(Type.Repetition.OPTIONAL)) || (fieldType.isRepetition(Type.Repetition.REQUIRED))
-        || (fieldType.isRepetition(Type.Repetition.REPEATED))) {
+    } else if ((fieldType.isRepetition(Type.Repetition.OPTIONAL)) || (fieldType.isRepetition(Type.Repetition.REQUIRED)) || (fieldType
+        .isRepetition(Type.Repetition.REPEATED))) {
       Group group = from.getGroup(fieldIndex, index);
       if (originalType == OriginalType.LIST) {
         return extractList(group);
diff --git a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordReader.java b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordReader.java
index 3c55137..0f27284 100644
--- a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordReader.java
+++ b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetNativeRecordReader.java
@@ -62,9 +62,7 @@ public class ParquetNativeRecordReader implements RecordReader {
     _recordExtractor = new ParquetNativeRecordExtractor();
     _recordExtractor.init(fieldsToRead, null);
     _schema = _parquetMetadata.getFileMetaData().getSchema();
-    _parquetFileReader =
-        new ParquetFileReader(conf, _parquetMetadata.getFileMetaData(), _dataFilePath, _parquetMetadata.getBlocks(),
-            _schema.getColumns());
+    _parquetFileReader = new ParquetFileReader(conf, _parquetMetadata.getFileMetaData(), _dataFilePath, _parquetMetadata.getBlocks(), _schema.getColumns());
     _pageReadStore = _parquetFileReader.readNextRowGroup();
     _columnIO = new ColumnIOFactory().getColumnIO(_schema);
     _parquetRecordReader = _columnIO.getRecordReader(_pageReadStore, new GroupRecordConverter(_schema));
@@ -113,9 +111,7 @@ public class ParquetNativeRecordReader implements RecordReader {
       throws IOException {
     _parquetFileReader.close();
     Configuration conf = new Configuration();
-    _parquetFileReader =
-        new ParquetFileReader(conf, _parquetMetadata.getFileMetaData(), _dataFilePath, _parquetMetadata.getBlocks(),
-            _schema.getColumns());
+    _parquetFileReader = new ParquetFileReader(conf, _parquetMetadata.getFileMetaData(), _dataFilePath, _parquetMetadata.getBlocks(), _schema.getColumns());
     _pageReadStore = _parquetFileReader.readNextRowGroup();
     _parquetRecordReader = _columnIO.getRecordReader(_pageReadStore, new GroupRecordConverter(_schema));
     _currentPageIdx = 0;
diff --git a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetUtils.java b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetUtils.java
index 5f3dd81..87a72d2 100644
--- a/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetUtils.java
+++ b/pinot-plugins/pinot-input-format/pinot-parquet/src/main/java/org/apache/pinot/plugin/inputformat/parquet/ParquetUtils.java
@@ -48,8 +48,8 @@ public class ParquetUtils {
   public static ParquetReader<GenericRecord> getParquetAvroReader(Path path)
       throws IOException {
     //noinspection unchecked
-    return AvroParquetReader.<GenericRecord>builder(path).disableCompatibility().withDataModel(GenericData.get())
-        .withConf(getParquetAvroReaderConfiguration()).build();
+    return AvroParquetReader.<GenericRecord>builder(path).disableCompatibility().withDataModel(GenericData.get()).withConf(getParquetAvroReaderConfiguration())
+        .build();
   }
 
   /**
@@ -57,8 +57,7 @@ public class ParquetUtils {
    */
   public static ParquetWriter<GenericRecord> getParquetAvroWriter(Path path, Schema schema)
       throws IOException {
-    return AvroParquetWriter.<GenericRecord>builder(path).withSchema(schema)
-        .withConf(getParquetAvroReaderConfiguration()).build();
+    return AvroParquetWriter.<GenericRecord>builder(path).withSchema(schema).withConf(getParquetAvroReaderConfiguration()).build();
   }
 
   /**
@@ -66,8 +65,7 @@ public class ParquetUtils {
    */
   public static Schema getParquetAvroSchema(Path path)
       throws IOException {
-    ParquetMetadata footer =
-        ParquetFileReader.readFooter(getParquetAvroReaderConfiguration(), path, ParquetMetadataConverter.NO_FILTER);
+    ParquetMetadata footer = ParquetFileReader.readFooter(getParquetAvroReaderConfiguration(), path, ParquetMetadataConverter.NO_FILTER);
     Map<String, String> metaData = footer.getFileMetaData().getKeyValueMetaData();
     String schemaString = metaData.get("parquet.avro.schema");
     if (schemaString == null) {
diff --git a/pinot-plugins/pinot-input-format/pinot-parquet/src/test/java/org/apache/pinot/plugin/inputformat/parquet/ParquetRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-parquet/src/test/java/org/apache/pinot/plugin/inputformat/parquet/ParquetRecordReaderTest.java
index f39df45..45f4a81 100644
--- a/pinot-plugins/pinot-input-format/pinot-parquet/src/test/java/org/apache/pinot/plugin/inputformat/parquet/ParquetRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-parquet/src/test/java/org/apache/pinot/plugin/inputformat/parquet/ParquetRecordReaderTest.java
@@ -63,8 +63,7 @@ public class ParquetRecordReaderTest extends AbstractRecordReaderTest {
       }
       records.add(record);
     }
-    try (ParquetWriter<GenericRecord> writer = ParquetUtils
-        .getParquetAvroWriter(new Path(_dataFile.getAbsolutePath()), schema)) {
+    try (ParquetWriter<GenericRecord> writer = ParquetUtils.getParquetAvroWriter(new Path(_dataFile.getAbsolutePath()), schema)) {
       for (GenericRecord record : records) {
         writer.write(record);
       }
@@ -135,7 +134,6 @@ public class ParquetRecordReaderTest extends AbstractRecordReaderTest {
       Assert.assertTrue(avroReaderRow.equals(nativeReaderRow));
       recordsRead++;
     }
-    Assert.assertEquals(recordsRead, totalRecords,
-        "Message read from ParquetRecordReader doesn't match the expected number.");
+    Assert.assertEquals(recordsRead, totalRecords, "Message read from ParquetRecordReader doesn't match the expected number.");
   }
 }
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/pom.xml b/pinot-plugins/pinot-input-format/pinot-protobuf/pom.xml
index ceefdd3..73a98cd 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/pom.xml
@@ -37,9 +37,6 @@
     <pinot.root>${basedir}/../../..</pinot.root>
     <proto.version>3.11.4</proto.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufFieldInfo.java b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufFieldInfo.java
index 5cbcef6..7e610e3 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufFieldInfo.java
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufFieldInfo.java
@@ -29,7 +29,7 @@ public class ProtoBufFieldInfo {
   private Object _fieldValue;
   private Descriptors.FieldDescriptor _fieldDescriptor;
 
-  public ProtoBufFieldInfo(Object fieldValue,  Descriptors.FieldDescriptor fieldDescriptor) {
+  public ProtoBufFieldInfo(Object fieldValue, Descriptors.FieldDescriptor fieldDescriptor) {
     _fieldValue = fieldValue;
     _fieldDescriptor = fieldDescriptor;
   }
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractor.java b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractor.java
index 17cb445..b723755 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractor.java
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractor.java
@@ -91,8 +91,7 @@ public class ProtoBufRecordExtractor extends BaseRecordExtractor<Message> {
   @Override
   protected boolean isMultiValue(Object value) {
     ProtoBufFieldInfo protoBufFieldInfo = (ProtoBufFieldInfo) value;
-    return protoBufFieldInfo.getFieldValue() instanceof Collection && !protoBufFieldInfo.getFieldDescriptor()
-        .isMapField();
+    return protoBufFieldInfo.getFieldValue() instanceof Collection && !protoBufFieldInfo.getFieldDescriptor().isMapField();
   }
 
   /**
@@ -101,8 +100,7 @@ public class ProtoBufRecordExtractor extends BaseRecordExtractor<Message> {
   @Override
   protected boolean isMap(Object value) {
     ProtoBufFieldInfo protoBufFieldInfo = (ProtoBufFieldInfo) value;
-    return protoBufFieldInfo.getFieldValue() instanceof Collection && protoBufFieldInfo.getFieldDescriptor()
-        .isMapField();
+    return protoBufFieldInfo.getFieldValue() instanceof Collection && protoBufFieldInfo.getFieldDescriptor().isMapField();
   }
 
   /**
@@ -120,8 +118,7 @@ public class ProtoBufRecordExtractor extends BaseRecordExtractor<Message> {
       return null;
     }
 
-    List<Descriptors.FieldDescriptor> fieldDescriptors =
-        protoBufFieldInfo.getFieldDescriptor().getMessageType().getFields();
+    List<Descriptors.FieldDescriptor> fieldDescriptors = protoBufFieldInfo.getFieldDescriptor().getMessageType().getFields();
     Descriptors.FieldDescriptor keyFieldDescriptor = fieldDescriptors.get(0);
     Descriptors.FieldDescriptor valueFieldDescriptor = fieldDescriptors.get(1);
     Map<Object, Object> convertedMap = new HashMap<>();
@@ -135,8 +132,7 @@ public class ProtoBufRecordExtractor extends BaseRecordExtractor<Message> {
         }
 
         if (convertedFieldValue != null) {
-          convertedMap
-              .put(convertSingleValue(new ProtoBufFieldInfo(fieldKey, keyFieldDescriptor)), convertedFieldValue);
+          convertedMap.put(convertSingleValue(new ProtoBufFieldInfo(fieldKey, keyFieldDescriptor)), convertedFieldValue);
         }
       }
     }
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReader.java b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReader.java
index 1d9b8d5..db0ed16 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReader.java
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/main/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReader.java
@@ -79,8 +79,7 @@ public class ProtoBufRecordReader implements RecordReader {
       throws IOException {
     try {
       DescriptorProtos.FileDescriptorSet set = DescriptorProtos.FileDescriptorSet.parseFrom(fin);
-      Descriptors.FileDescriptor fileDescriptor =
-          Descriptors.FileDescriptor.buildFrom(set.getFile(0), new Descriptors.FileDescriptor[]{});
+      Descriptors.FileDescriptor fileDescriptor = Descriptors.FileDescriptor.buildFrom(set.getFile(0), new Descriptors.FileDescriptor[]{});
       return fileDescriptor.getMessageTypes().get(0);
     } catch (Descriptors.DescriptorValidationException e) {
       throw new IOException("Descriptor file validation failed", e);
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractorTest.java
index 38c7190..2ca5525 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordExtractorTest.java
@@ -67,8 +67,8 @@ public class ProtoBufRecordExtractorTest extends AbstractRecordExtractorTest {
 
   @Override
   protected Set<String> getSourceFields() {
-    return Sets.newHashSet(STRING_FIELD, INT_FIELD, LONG_FIELD, DOUBLE_FIELD, FLOAT_FIELD, BOOL_FIELD, BYTES_FIELD,
-        REPEATED_STRINGS, NESTED_MESSAGE, REPEATED_NESTED_MESSAGES, COMPLEX_MAP, SIMPLE_MAP, ENUM_FIELD);
+    return Sets.newHashSet(STRING_FIELD, INT_FIELD, LONG_FIELD, DOUBLE_FIELD, FLOAT_FIELD, BOOL_FIELD, BYTES_FIELD, REPEATED_STRINGS, NESTED_MESSAGE,
+        REPEATED_NESTED_MESSAGES, COMPLEX_MAP, SIMPLE_MAP, ENUM_FIELD);
   }
 
   /**
@@ -105,8 +105,7 @@ public class ProtoBufRecordExtractorTest extends AbstractRecordExtractorTest {
         messageBuilder.addRepeatedNestedMessages(createNestedMessage(nestedMessage));
       }
 
-      Map<String, Map<String, Object>> complexMapValues =
-          (Map<String, Map<String, Object>>) inputRecord.get(COMPLEX_MAP);
+      Map<String, Map<String, Object>> complexMapValues = (Map<String, Map<String, Object>>) inputRecord.get(COMPLEX_MAP);
       for (Map.Entry<String, Map<String, Object>> mapEntry : complexMapValues.entrySet()) {
         messageBuilder.putComplexMap(mapEntry.getKey(), createNestedMessage(mapEntry.getValue()));
       }
@@ -121,10 +120,8 @@ public class ProtoBufRecordExtractorTest extends AbstractRecordExtractorTest {
 
   private ComplexTypes.TestMessage.NestedMessage createNestedMessage(Map<String, Object> nestedMessageFields) {
     ComplexTypes.TestMessage.NestedMessage.Builder nestedMessage = ComplexTypes.TestMessage.NestedMessage.newBuilder();
-    return nestedMessage
-        .setNestedIntField((Integer) nestedMessageFields.get(NESTED_INT_FIELD))
-        .setNestedStringField((String) nestedMessageFields.get(NESTED_STRING_FIELD))
-        .build();
+    return nestedMessage.setNestedIntField((Integer) nestedMessageFields.get(NESTED_INT_FIELD))
+        .setNestedStringField((String) nestedMessageFields.get(NESTED_STRING_FIELD)).build();
   }
 
   private Map<String, Object> createRecord1() {
@@ -138,17 +135,10 @@ public class ProtoBufRecordExtractorTest extends AbstractRecordExtractorTest {
     record.put(BYTES_FIELD, StringUtils.encodeUtf8("hello world!"));
     record.put(REPEATED_STRINGS, Arrays.asList("aaa", "bbb", "ccc"));
     record.put(NESTED_MESSAGE, getNestedMap(NESTED_STRING_FIELD, "ice cream", NESTED_INT_FIELD, 9));
-    record.put(REPEATED_NESTED_MESSAGES, Arrays.asList(
-        getNestedMap(NESTED_STRING_FIELD, "vanilla", NESTED_INT_FIELD, 3),
-        getNestedMap(NESTED_STRING_FIELD, "chocolate", NESTED_INT_FIELD, 5)
-    ));
-    record.put(
-        COMPLEX_MAP,
-        getNestedMap(
-            "fruit1", getNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1),
-            "fruit2", getNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)
-        )
-    );
+    record.put(REPEATED_NESTED_MESSAGES,
+        Arrays.asList(getNestedMap(NESTED_STRING_FIELD, "vanilla", NESTED_INT_FIELD, 3), getNestedMap(NESTED_STRING_FIELD, "chocolate", NESTED_INT_FIELD, 5)));
+    record.put(COMPLEX_MAP, getNestedMap("fruit1", getNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1), "fruit2",
+        getNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)));
     record.put(SIMPLE_MAP, getNestedMap("Tuesday", 3, "Wednesday", 4));
     record.put(ENUM_FIELD, "GAMMA");
     return record;
@@ -165,17 +155,10 @@ public class ProtoBufRecordExtractorTest extends AbstractRecordExtractorTest {
     record.put(BYTES_FIELD, StringUtils.encodeUtf8("goodbye world!"));
     record.put(REPEATED_STRINGS, Arrays.asList("ddd", "eee", "fff"));
     record.put(NESTED_MESSAGE, getNestedMap(NESTED_STRING_FIELD, "Starbucks", NESTED_INT_FIELD, 100));
-    record.put(REPEATED_NESTED_MESSAGES, Arrays.asList(
-        getNestedMap(NESTED_STRING_FIELD, "coffee", NESTED_INT_FIELD, 10),
-        getNestedMap(NESTED_STRING_FIELD, "tea", NESTED_INT_FIELD, 20)
-    ));
-    record.put(
-        COMPLEX_MAP,
-        getNestedMap(
-            "food3", getNestedMap(NESTED_STRING_FIELD, "pizza", NESTED_INT_FIELD, 1),
-            "food4", getNestedMap(NESTED_STRING_FIELD, "hamburger", NESTED_INT_FIELD, 2)
-        )
-    );
+    record.put(REPEATED_NESTED_MESSAGES,
+        Arrays.asList(getNestedMap(NESTED_STRING_FIELD, "coffee", NESTED_INT_FIELD, 10), getNestedMap(NESTED_STRING_FIELD, "tea", NESTED_INT_FIELD, 20)));
+    record.put(COMPLEX_MAP, getNestedMap("food3", getNestedMap(NESTED_STRING_FIELD, "pizza", NESTED_INT_FIELD, 1), "food4",
+        getNestedMap(NESTED_STRING_FIELD, "hamburger", NESTED_INT_FIELD, 2)));
     record.put(SIMPLE_MAP, getNestedMap("Sunday", 1, "Monday", 2));
     record.put(ENUM_FIELD, "BETA");
     return record;
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReaderTest.java
index ce9cd1b..45db584 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/java/org/apache/pinot/plugin/inputformat/protobuf/ProtoBufRecordReaderTest.java
@@ -50,10 +50,8 @@ public class ProtoBufRecordReaderTest extends AbstractRecordReaderTest {
 
   @Override
   protected Schema getPinotSchema() {
-    return new Schema.SchemaBuilder().setSchemaName("SampleRecord")
-        .addSingleValueDimension("id", FieldSpec.DataType.INT)
-        .addSingleValueDimension("name", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("email", FieldSpec.DataType.STRING)
+    return new Schema.SchemaBuilder().setSchemaName("SampleRecord").addSingleValueDimension("id", FieldSpec.DataType.INT)
+        .addSingleValueDimension("name", FieldSpec.DataType.STRING).addSingleValueDimension("email", FieldSpec.DataType.STRING)
         .addMultiValueDimension("friends", FieldSpec.DataType.STRING).build();
   }
 
@@ -145,8 +143,8 @@ public class ProtoBufRecordReaderTest extends AbstractRecordReaderTest {
     List<Sample.SampleRecord> lists = new ArrayList<>();
     for (Map<String, Object> record : recordsToWrite) {
       Sample.SampleRecord sampleRecord =
-          Sample.SampleRecord.newBuilder().setEmail((String) record.get("email")).setName((String) record.get("name"))
-              .setId((Integer) record.get("id")).addAllFriends((List<String>) record.get("friends")).build();
+          Sample.SampleRecord.newBuilder().setEmail((String) record.get("email")).setName((String) record.get("name")).setId((Integer) record.get("id"))
+              .addAllFriends((List<String>) record.get("friends")).build();
 
       lists.add(sampleRecord);
     }
diff --git a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/resources/log4j2.xml b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/resources/log4j2.xml
index 4bbc67f..f0d887a 100644
--- a/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/resources/log4j2.xml
+++ b/pinot-plugins/pinot-input-format/pinot-protobuf/src/test/resources/log4j2.xml
@@ -29,7 +29,7 @@
   </Appenders>
   <Loggers>
     <AsyncRoot level="warn" additivity="false">
-      <AppenderRef ref="console" />
+      <AppenderRef ref="console"/>
     </AsyncRoot>
   </Loggers>
 </Configuration>
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/pom.xml b/pinot-plugins/pinot-input-format/pinot-thrift/pom.xml
index 53eed99..77fb7a6 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/pom.xml
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractor.java b/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractor.java
index 7760921..d10557d 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractor.java
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractor.java
@@ -97,7 +97,7 @@ public class ThriftRecordExtractor extends BaseRecordExtractor<TBase> {
   protected Object convertRecord(Object value) {
     TBase record = (TBase) value;
     Map<Object, Object> convertedRecord = new HashMap<>();
-    for (TFieldIdEnum tFieldIdEnum: FieldMetaData.getStructMetaDataMap(record.getClass()).keySet()) {
+    for (TFieldIdEnum tFieldIdEnum : FieldMetaData.getStructMetaDataMap(record.getClass()).keySet()) {
       Object fieldValue = record.getFieldValue(tFieldIdEnum);
       if (fieldValue != null) {
         fieldValue = convert(fieldValue);
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReader.java b/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReader.java
index 2fcef1b..eda15e9 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReader.java
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/src/main/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReader.java
@@ -63,8 +63,7 @@ public class ThriftRecordReader implements RecordReader {
       throw new RuntimeException(e);
     }
 
-    Map<? extends TFieldIdEnum, org.apache.thrift.meta_data.FieldMetaData> metaDataMap =
-        FieldMetaData.getStructMetaDataMap(tObject.getClass());
+    Map<? extends TFieldIdEnum, org.apache.thrift.meta_data.FieldMetaData> metaDataMap = FieldMetaData.getStructMetaDataMap(tObject.getClass());
     for (TFieldIdEnum tFieldIdEnum : metaDataMap.keySet()) {
       _fieldIds.put(tFieldIdEnum.getFieldName(), Short.toUnsignedInt(tFieldIdEnum.getThriftFieldId()));
     }
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractorTest.java b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractorTest.java
index 28d6c65..33b6003 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractorTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordExtractorTest.java
@@ -65,8 +65,8 @@ public class ThriftRecordExtractorTest extends AbstractRecordExtractorTest {
 
   @Override
   protected Set<String> getSourceFields() {
-    return Sets.newHashSet(INT_FIELD, LONG_FIELD, BOOL_FIELD, DOUBLE_FIELD, STRING_FIELD, ENUM_FIELD,
-        OPTIONAL_STRING_FIELD, NESTED_STRUCT_FIELD, SIMPLE_LIST, COMPLEX_LIST, SIMPLE_MAP, COMPLEX_MAP);
+    return Sets.newHashSet(INT_FIELD, LONG_FIELD, BOOL_FIELD, DOUBLE_FIELD, STRING_FIELD, ENUM_FIELD, OPTIONAL_STRING_FIELD, NESTED_STRUCT_FIELD, SIMPLE_LIST,
+        COMPLEX_LIST, SIMPLE_MAP, COMPLEX_MAP);
   }
 
   /**
@@ -100,17 +100,13 @@ public class ThriftRecordExtractorTest extends AbstractRecordExtractorTest {
       thriftRecord.setLongField((long) inputRecord.get(LONG_FIELD));
 
       Map<String, Object> nestedStructValues = (Map<String, Object>) inputRecord.get(NESTED_STRUCT_FIELD);
-      thriftRecord.setNestedStructField(createNestedType(
-          (String) nestedStructValues.get(NESTED_STRING_FIELD),
-          (int) nestedStructValues.get(NESTED_INT_FIELD))
-      );
+      thriftRecord.setNestedStructField(createNestedType((String) nestedStructValues.get(NESTED_STRING_FIELD), (int) nestedStructValues.get(NESTED_INT_FIELD)));
 
       thriftRecord.setSimpleListField((List<String>) inputRecord.get(SIMPLE_LIST));
 
       List<NestedType> nestedTypeList = new ArrayList<>();
       for (Map element : (List<Map>) inputRecord.get(COMPLEX_LIST)) {
-        nestedTypeList.add(createNestedType((String) element.get(NESTED_STRING_FIELD),
-            (Integer) element.get(NESTED_INT_FIELD)));
+        nestedTypeList.add(createNestedType((String) element.get(NESTED_STRING_FIELD), (Integer) element.get(NESTED_INT_FIELD)));
       }
 
       thriftRecord.setComplexListField(nestedTypeList);
@@ -121,11 +117,8 @@ public class ThriftRecordExtractorTest extends AbstractRecordExtractorTest {
       thriftRecord.setSimpleMapField((Map<String, Integer>) inputRecord.get(SIMPLE_MAP));
 
       Map<String, NestedType> complexMap = new HashMap<>();
-      for (Map.Entry<String, Map<String, Object>> entry :
-          ((Map<String, Map<String, Object>>) inputRecord.get(COMPLEX_MAP)).entrySet()) {
-        complexMap.put(entry.getKey(), createNestedType(
-            (String) entry.getValue().get(NESTED_STRING_FIELD),
-            (int) entry.getValue().get(NESTED_INT_FIELD)));
+      for (Map.Entry<String, Map<String, Object>> entry : ((Map<String, Map<String, Object>>) inputRecord.get(COMPLEX_MAP)).entrySet()) {
+        complexMap.put(entry.getKey(), createNestedType((String) entry.getValue().get(NESTED_STRING_FIELD), (int) entry.getValue().get(NESTED_INT_FIELD)));
       }
       thriftRecord.setComplexMapField(complexMap);
       thriftRecords.add(thriftRecord);
@@ -153,21 +146,12 @@ public class ThriftRecordExtractorTest extends AbstractRecordExtractorTest {
     record.put(ENUM_FIELD, TestEnum.DELTA.toString());
     record.put(NESTED_STRUCT_FIELD, createNestedMap(NESTED_STRING_FIELD, "ice cream", NESTED_INT_FIELD, 5));
     record.put(SIMPLE_LIST, Arrays.asList("aaa", "bbb", "ccc"));
-    record.put(COMPLEX_LIST,
-        Arrays.asList(
-            createNestedMap(NESTED_STRING_FIELD, "hows", NESTED_INT_FIELD, 10),
-            createNestedMap(NESTED_STRING_FIELD, "it", NESTED_INT_FIELD, 20),
-            createNestedMap(NESTED_STRING_FIELD, "going", NESTED_INT_FIELD, 30)
-        )
-    );
+    record.put(COMPLEX_LIST, Arrays
+        .asList(createNestedMap(NESTED_STRING_FIELD, "hows", NESTED_INT_FIELD, 10), createNestedMap(NESTED_STRING_FIELD, "it", NESTED_INT_FIELD, 20),
+            createNestedMap(NESTED_STRING_FIELD, "going", NESTED_INT_FIELD, 30)));
     record.put(SIMPLE_MAP, createNestedMap("Tuesday", 3, "Wednesday", 4));
-    record.put(
-        COMPLEX_MAP,
-        createNestedMap(
-            "fruit1", createNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1),
-            "fruit2", createNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)
-        )
-    );
+    record.put(COMPLEX_MAP, createNestedMap("fruit1", createNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1), "fruit2",
+        createNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)));
     return record;
   }
 
@@ -181,21 +165,12 @@ public class ThriftRecordExtractorTest extends AbstractRecordExtractorTest {
     record.put(ENUM_FIELD, TestEnum.GAMMA.toString());
     record.put(NESTED_STRUCT_FIELD, createNestedMap(NESTED_STRING_FIELD, "ice cream", NESTED_INT_FIELD, 5));
     record.put(SIMPLE_LIST, Arrays.asList("aaa", "bbb", "ccc"));
-    record.put(COMPLEX_LIST,
-        Arrays.asList(
-            createNestedMap(NESTED_STRING_FIELD, "hows", NESTED_INT_FIELD, 10),
-            createNestedMap(NESTED_STRING_FIELD, "it", NESTED_INT_FIELD, 20),
-            createNestedMap(NESTED_STRING_FIELD, "going", NESTED_INT_FIELD, 30)
-        )
-    );
+    record.put(COMPLEX_LIST, Arrays
+        .asList(createNestedMap(NESTED_STRING_FIELD, "hows", NESTED_INT_FIELD, 10), createNestedMap(NESTED_STRING_FIELD, "it", NESTED_INT_FIELD, 20),
+            createNestedMap(NESTED_STRING_FIELD, "going", NESTED_INT_FIELD, 30)));
     record.put(SIMPLE_MAP, createNestedMap("Tuesday", 3, "Wednesday", 4));
-    record.put(
-        COMPLEX_MAP,
-        createNestedMap(
-            "fruit1", createNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1),
-            "fruit2", createNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)
-        )
-    );
+    record.put(COMPLEX_MAP, createNestedMap("fruit1", createNestedMap(NESTED_STRING_FIELD, "apple", NESTED_INT_FIELD, 1), "fruit2",
+        createNestedMap(NESTED_STRING_FIELD, "orange", NESTED_INT_FIELD, 2)));
     return record;
   }
 
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReaderTest.java b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReaderTest.java
index 085fbf9..e4e516e 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReaderTest.java
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/java/org/apache/pinot/plugin/inputformat/thrift/ThriftRecordReaderTest.java
@@ -142,12 +142,9 @@ public class ThriftRecordReaderTest {
   }
 
   private Schema getSchema() {
-    return new Schema.SchemaBuilder().setSchemaName("ThriftSampleData")
-        .addSingleValueDimension("id", FieldSpec.DataType.INT)
-        .addSingleValueDimension("name", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("created_at", FieldSpec.DataType.LONG)
-        .addSingleValueDimension("active", FieldSpec.DataType.BOOLEAN)
-        .addMultiValueDimension("groups", FieldSpec.DataType.INT)
+    return new Schema.SchemaBuilder().setSchemaName("ThriftSampleData").addSingleValueDimension("id", FieldSpec.DataType.INT)
+        .addSingleValueDimension("name", FieldSpec.DataType.STRING).addSingleValueDimension("created_at", FieldSpec.DataType.LONG)
+        .addSingleValueDimension("active", FieldSpec.DataType.BOOLEAN).addMultiValueDimension("groups", FieldSpec.DataType.INT)
         .addMultiValueDimension("set_values", FieldSpec.DataType.STRING).build();
   }
 
diff --git a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/resources/log4j2.xml b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/resources/log4j2.xml
index 4bbc67f..f0d887a 100644
--- a/pinot-plugins/pinot-input-format/pinot-thrift/src/test/resources/log4j2.xml
+++ b/pinot-plugins/pinot-input-format/pinot-thrift/src/test/resources/log4j2.xml
@@ -29,7 +29,7 @@
   </Appenders>
   <Loggers>
     <AsyncRoot level="warn" additivity="false">
-      <AppenderRef ref="console" />
+      <AppenderRef ref="console"/>
     </AsyncRoot>
   </Loggers>
 </Configuration>
diff --git a/pinot-plugins/pinot-metrics/pinot-dropwizard/pom.xml b/pinot-plugins/pinot-metrics/pinot-dropwizard/pom.xml
index 676e8e1..84b4e89 100644
--- a/pinot-plugins/pinot-metrics/pinot-dropwizard/pom.xml
+++ b/pinot-plugins/pinot-metrics/pinot-dropwizard/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <build>
diff --git a/pinot-plugins/pinot-metrics/pinot-yammer/pom.xml b/pinot-plugins/pinot-metrics/pinot-yammer/pom.xml
index 5fa768f..24cfcd2 100644
--- a/pinot-plugins/pinot-metrics/pinot-yammer/pom.xml
+++ b/pinot-plugins/pinot-metrics/pinot-yammer/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <build>
diff --git a/pinot-plugins/pinot-metrics/pinot-yammer/src/main/java/org/apache/pinot/plugin/metrics/yammer/YammerMetricsRegistry.java b/pinot-plugins/pinot-metrics/pinot-yammer/src/main/java/org/apache/pinot/plugin/metrics/yammer/YammerMetricsRegistry.java
index 128b06a..b7a734c 100644
--- a/pinot-plugins/pinot-metrics/pinot-yammer/src/main/java/org/apache/pinot/plugin/metrics/yammer/YammerMetricsRegistry.java
+++ b/pinot-plugins/pinot-metrics/pinot-yammer/src/main/java/org/apache/pinot/plugin/metrics/yammer/YammerMetricsRegistry.java
@@ -55,7 +55,7 @@ public class YammerMetricsRegistry implements PinotMetricsRegistry {
   }
 
   @Override
-  public <T>PinotGauge<T> newGauge(PinotMetricName name, PinotGauge<T> gauge) {
+  public <T> PinotGauge<T> newGauge(PinotMetricName name, PinotGauge<T> gauge) {
     return new YammerGauge<T>(_metricsRegistry.newGauge((MetricName) name.getMetricName(), (Gauge<T>) gauge.getGauge()));
   }
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/pom.xml b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/pom.xml
index 0219af4..15fcd22 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/pom.xml
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/pom.xml
@@ -35,9 +35,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>none</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <build>
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseMultipleSegmentsConversionExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseMultipleSegmentsConversionExecutor.java
index 0ef9de4..7fbda0c 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseMultipleSegmentsConversionExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseMultipleSegmentsConversionExecutor.java
@@ -66,8 +66,7 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
    * @return a list of segment conversion result
    * @throws Exception
    */
-  protected abstract List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs,
-      File workingDir)
+  protected abstract List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs, File workingDir)
       throws Exception;
 
   /**
@@ -98,8 +97,8 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
     String replaceSegmentsString = configs.get(MinionConstants.ENABLE_REPLACE_SEGMENTS_KEY);
     boolean replaceSegmentsEnabled = Boolean.parseBoolean(replaceSegmentsString);
 
-    LOGGER.info("Start executing {} on table: {}, input segments: {} with downloadURLs: {}, uploadURL: {}", taskType,
-        tableNameWithType, inputSegmentNames, downloadURLString, uploadURL);
+    LOGGER.info("Start executing {} on table: {}, input segments: {} with downloadURLs: {}, uploadURL: {}", taskType, tableNameWithType, inputSegmentNames,
+        downloadURLString, uploadURL);
 
     File tempDataDir = new File(new File(MINION_CONTEXT.getDataDir(), taskType), "tmp-" + UUID.randomUUID());
     Preconditions.checkState(tempDataDir.mkdirs());
@@ -136,8 +135,8 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
       for (SegmentConversionResult segmentConversionResult : segmentConversionResults) {
         // Tar the converted segment
         File convertedSegmentDir = segmentConversionResult.getFile();
-        File convertedSegmentTarFile = new File(convertedTarredSegmentDir,
-            segmentConversionResult.getSegmentName() + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
+        File convertedSegmentTarFile =
+            new File(convertedTarredSegmentDir, segmentConversionResult.getSegmentName() + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
         TarGzCompressionUtils.createTarGzFile(convertedSegmentDir, convertedSegmentTarFile);
         tarredSegmentFiles.add(convertedSegmentTarFile);
         if (!FileUtils.deleteQuietly(convertedSegmentDir)) {
@@ -157,19 +156,15 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
       // Check whether the task get cancelled before uploading the segment
       if (_cancelled) {
         LOGGER.info("{} on table: {}, segments: {} got cancelled", taskType, tableNameWithType, inputSegmentNames);
-        throw new TaskCancelledException(
-            taskType + " on table: " + tableNameWithType + ", segments: " + inputSegmentNames + " got cancelled");
+        throw new TaskCancelledException(taskType + " on table: " + tableNameWithType + ", segments: " + inputSegmentNames + " got cancelled");
       }
 
       // Update the segment lineage to indicate that the segment replacement is in progress.
       String lineageEntryId = null;
       if (replaceSegmentsEnabled) {
-        List<String> segmentsFrom =
-            Arrays.stream(inputSegmentNames.split(",")).map(String::trim).collect(Collectors.toList());
-        List<String> segmentsTo =
-            segmentConversionResults.stream().map(SegmentConversionResult::getSegmentName).collect(Collectors.toList());
-        lineageEntryId = SegmentConversionUtils.startSegmentReplace(tableNameWithType, uploadURL,
-            new StartReplaceSegmentsRequest(segmentsFrom, segmentsTo));
+        List<String> segmentsFrom = Arrays.stream(inputSegmentNames.split(",")).map(String::trim).collect(Collectors.toList());
+        List<String> segmentsTo = segmentConversionResults.stream().map(SegmentConversionResult::getSegmentName).collect(Collectors.toList());
+        lineageEntryId = SegmentConversionUtils.startSegmentReplace(tableNameWithType, uploadURL, new StartReplaceSegmentsRequest(segmentsFrom, segmentsTo));
       }
 
       // Upload the tarred segments
@@ -179,11 +174,9 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
         String resultSegmentName = segmentConversionResult.getSegmentName();
 
         // Set segment ZK metadata custom map modifier into HTTP header to modify the segment ZK metadata
-        SegmentZKMetadataCustomMapModifier segmentZKMetadataCustomMapModifier =
-            getSegmentZKMetadataCustomMapModifier(pinotTaskConfig, segmentConversionResult);
+        SegmentZKMetadataCustomMapModifier segmentZKMetadataCustomMapModifier = getSegmentZKMetadataCustomMapModifier(pinotTaskConfig, segmentConversionResult);
         Header segmentZKMetadataCustomMapModifierHeader =
-            new BasicHeader(FileUploadDownloadClient.CustomHeaders.SEGMENT_ZK_METADATA_CUSTOM_MAP_MODIFIER,
-                segmentZKMetadataCustomMapModifier.toJsonString());
+            new BasicHeader(FileUploadDownloadClient.CustomHeaders.SEGMENT_ZK_METADATA_CUSTOM_MAP_MODIFIER, segmentZKMetadataCustomMapModifier.toJsonString());
 
         List<Header> httpHeaders = new ArrayList<>();
         httpHeaders.add(segmentZKMetadataCustomMapModifierHeader);
@@ -192,13 +185,11 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
         // Set parameters for upload request
         NameValuePair enableParallelPushProtectionParameter =
             new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.ENABLE_PARALLEL_PUSH_PROTECTION, "true");
-        NameValuePair tableNameParameter = new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.TABLE_NAME,
-            TableNameBuilder.extractRawTableName(tableNameWithType));
+        NameValuePair tableNameParameter =
+            new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.TABLE_NAME, TableNameBuilder.extractRawTableName(tableNameWithType));
         List<NameValuePair> parameters = Arrays.asList(enableParallelPushProtectionParameter, tableNameParameter);
 
-        SegmentConversionUtils
-            .uploadSegment(configs, httpHeaders, parameters, tableNameWithType, resultSegmentName, uploadURL,
-                convertedTarredSegmentFile);
+        SegmentConversionUtils.uploadSegment(configs, httpHeaders, parameters, tableNameWithType, resultSegmentName, uploadURL, convertedTarredSegmentFile);
         if (!FileUtils.deleteQuietly(convertedTarredSegmentFile)) {
           LOGGER.warn("Failed to delete tarred converted segment: {}", convertedTarredSegmentFile.getAbsolutePath());
         }
@@ -209,12 +200,10 @@ public abstract class BaseMultipleSegmentsConversionExecutor extends BaseTaskExe
         SegmentConversionUtils.endSegmentReplace(tableNameWithType, uploadURL, lineageEntryId);
       }
 
-      String outputSegmentNames = segmentConversionResults.stream().map(SegmentConversionResult::getSegmentName)
-          .collect(Collectors.joining(","));
+      String outputSegmentNames = segmentConversionResults.stream().map(SegmentConversionResult::getSegmentName).collect(Collectors.joining(","));
       postProcess(pinotTaskConfig);
       LOGGER
-          .info("Done executing {} on table: {}, input segments: {}, output segments: {}", taskType, tableNameWithType,
-              inputSegmentNames, outputSegmentNames);
+          .info("Done executing {} on table: {}, input segments: {}, output segments: {}", taskType, tableNameWithType, inputSegmentNames, outputSegmentNames);
 
       return segmentConversionResults;
     } finally {
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseSingleSegmentConversionExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseSingleSegmentConversionExecutor.java
index db329bd..fc59b37 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseSingleSegmentConversionExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseSingleSegmentConversionExecutor.java
@@ -72,14 +72,12 @@ public abstract class BaseSingleSegmentConversionExecutor extends BaseTaskExecut
 
     long currentSegmentCrc = getSegmentCrc(tableNameWithType, segmentName);
     if (Long.parseLong(originalSegmentCrc) != currentSegmentCrc) {
-      LOGGER.info("Segment CRC does not match, skip the task. Original CRC: {}, current CRC: {}", originalSegmentCrc,
-          currentSegmentCrc);
-      return new SegmentConversionResult.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName)
-          .build();
+      LOGGER.info("Segment CRC does not match, skip the task. Original CRC: {}, current CRC: {}", originalSegmentCrc, currentSegmentCrc);
+      return new SegmentConversionResult.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName).build();
     }
 
-    LOGGER.info("Start executing {} on table: {}, segment: {} with downloadURL: {}, uploadURL: {}", taskType,
-        tableNameWithType, segmentName, downloadURL, uploadURL);
+    LOGGER.info("Start executing {} on table: {}, segment: {} with downloadURL: {}, uploadURL: {}", taskType, tableNameWithType, segmentName, downloadURL,
+        uploadURL);
 
     File tempDataDir = new File(new File(MINION_CONTEXT.getDataDir(), taskType), "tmp-" + UUID.randomUUID());
     Preconditions.checkState(tempDataDir.mkdirs(), "Failed to create temporary directory: %s", tempDataDir);
@@ -102,14 +100,13 @@ public abstract class BaseSingleSegmentConversionExecutor extends BaseTaskExecut
       File workingDir = new File(tempDataDir, "workingDir");
       Preconditions.checkState(workingDir.mkdir());
       SegmentConversionResult segmentConversionResult = convert(pinotTaskConfig, indexDir, workingDir);
-      Preconditions.checkState(segmentConversionResult.getSegmentName().equals(segmentName),
-          "Converted segment name: %s does not match original segment name: %s",
-          segmentConversionResult.getSegmentName(), segmentName);
+      Preconditions
+          .checkState(segmentConversionResult.getSegmentName().equals(segmentName), "Converted segment name: %s does not match original segment name: %s",
+              segmentConversionResult.getSegmentName(), segmentName);
 
       // Tar the converted segment
       File convertedSegmentDir = segmentConversionResult.getFile();
-      File convertedTarredSegmentFile =
-          new File(tempDataDir, segmentName + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
+      File convertedTarredSegmentFile = new File(tempDataDir, segmentName + TarGzCompressionUtils.TAR_GZ_FILE_EXTENSION);
       TarGzCompressionUtils.createTarGzFile(convertedSegmentDir, convertedTarredSegmentFile);
       if (!FileUtils.deleteQuietly(convertedSegmentDir)) {
         LOGGER.warn("Failed to delete converted segment: {}", convertedSegmentDir.getAbsolutePath());
@@ -125,8 +122,7 @@ public abstract class BaseSingleSegmentConversionExecutor extends BaseTaskExecut
       // Check whether the task get cancelled before uploading the segment
       if (_cancelled) {
         LOGGER.info("{} on table: {}, segment: {} got cancelled", taskType, tableNameWithType, segmentName);
-        throw new TaskCancelledException(
-            taskType + " on table: " + tableNameWithType + ", segment: " + segmentName + " got cancelled");
+        throw new TaskCancelledException(taskType + " on table: " + tableNameWithType + ", segment: " + segmentName + " got cancelled");
       }
 
       // Set original segment CRC into HTTP IF-MATCH header to check whether the original segment get refreshed, so that
@@ -136,11 +132,9 @@ public abstract class BaseSingleSegmentConversionExecutor extends BaseTaskExecut
       // Set segment ZK metadata custom map modifier into HTTP header to modify the segment ZK metadata
       // NOTE: even segment is not changed, still need to upload the segment to update the segment ZK metadata so that
       // segment will not be submitted again
-      SegmentZKMetadataCustomMapModifier segmentZKMetadataCustomMapModifier =
-          getSegmentZKMetadataCustomMapModifier(pinotTaskConfig, segmentConversionResult);
+      SegmentZKMetadataCustomMapModifier segmentZKMetadataCustomMapModifier = getSegmentZKMetadataCustomMapModifier(pinotTaskConfig, segmentConversionResult);
       Header segmentZKMetadataCustomMapModifierHeader =
-          new BasicHeader(FileUploadDownloadClient.CustomHeaders.SEGMENT_ZK_METADATA_CUSTOM_MAP_MODIFIER,
-              segmentZKMetadataCustomMapModifier.toJsonString());
+          new BasicHeader(FileUploadDownloadClient.CustomHeaders.SEGMENT_ZK_METADATA_CUSTOM_MAP_MODIFIER, segmentZKMetadataCustomMapModifier.toJsonString());
 
       List<Header> httpHeaders = new ArrayList<>();
       httpHeaders.add(ifMatchHeader);
@@ -150,13 +144,12 @@ public abstract class BaseSingleSegmentConversionExecutor extends BaseTaskExecut
       // Set parameters for upload request.
       NameValuePair enableParallelPushProtectionParameter =
           new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.ENABLE_PARALLEL_PUSH_PROTECTION, "true");
-      NameValuePair tableNameParameter = new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.TABLE_NAME,
-          TableNameBuilder.extractRawTableName(tableNameWithType));
+      NameValuePair tableNameParameter =
+          new BasicNameValuePair(FileUploadDownloadClient.QueryParameters.TABLE_NAME, TableNameBuilder.extractRawTableName(tableNameWithType));
       List<NameValuePair> parameters = Arrays.asList(enableParallelPushProtectionParameter, tableNameParameter);
 
       // Upload the tarred segment
-      SegmentConversionUtils.uploadSegment(configs, httpHeaders, parameters, tableNameWithType, segmentName, uploadURL,
-          convertedTarredSegmentFile);
+      SegmentConversionUtils.uploadSegment(configs, httpHeaders, parameters, tableNameWithType, segmentName, uploadURL, convertedTarredSegmentFile);
       if (!FileUtils.deleteQuietly(convertedTarredSegmentFile)) {
         LOGGER.warn("Failed to delete tarred converted segment: {}", convertedTarredSegmentFile.getAbsolutePath());
       }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseTaskExecutor.java
index d85bf44..e105306 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/BaseTaskExecutor.java
@@ -42,12 +42,11 @@ public abstract class BaseTaskExecutor implements PinotTaskExecutor {
   /**
    * Returns the segment ZK metadata custom map modifier.
    */
-  protected abstract SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(
-      PinotTaskConfig pinotTaskConfig, SegmentConversionResult segmentConversionResult);
+  protected abstract SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
+      SegmentConversionResult segmentConversionResult);
 
   protected TableConfig getTableConfig(String tableNameWithType) {
-    TableConfig tableConfig =
-        ZKMetadataProvider.getTableConfig(MINION_CONTEXT.getHelixPropertyStore(), tableNameWithType);
+    TableConfig tableConfig = ZKMetadataProvider.getTableConfig(MINION_CONTEXT.getHelixPropertyStore(), tableNameWithType);
     Preconditions.checkState(tableConfig != null, "Failed to find table config for table: %s", tableNameWithType);
     return tableConfig;
   }
@@ -59,8 +58,7 @@ public abstract class BaseTaskExecutor implements PinotTaskExecutor {
   }
 
   protected long getSegmentCrc(String tableNameWithType, String segmentName) {
-    SegmentZKMetadata segmentZKMetadata =
-        ZKMetadataProvider.getSegmentZKMetadata(MINION_CONTEXT.getHelixPropertyStore(), tableNameWithType, segmentName);
+    SegmentZKMetadata segmentZKMetadata = ZKMetadataProvider.getSegmentZKMetadata(MINION_CONTEXT.getHelixPropertyStore(), tableNameWithType, segmentName);
     /*
      * If the segmentZKMetadata is null, it is likely that the segment has been deleted, return -1 as CRC in this case,
      * so that task can terminate early when verify CRC. If we throw exception, helix will keep retrying this forever
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
index f6b3aec..3b04b27 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
@@ -54,16 +54,13 @@ public class MergeTaskUtils {
    * the table does not have a time column.
    */
   @Nullable
-  public static TimeHandlerConfig getTimeHandlerConfig(TableConfig tableConfig, Schema schema,
-      Map<String, String> taskConfig) {
+  public static TimeHandlerConfig getTimeHandlerConfig(TableConfig tableConfig, Schema schema, Map<String, String> taskConfig) {
     String timeColumn = tableConfig.getValidationConfig().getTimeColumnName();
     if (timeColumn == null) {
       return null;
     }
     DateTimeFieldSpec fieldSpec = schema.getSpecForTimeColumn(timeColumn);
-    Preconditions
-        .checkState(fieldSpec != null, "No valid spec found for time column: %s in schema for table: %s", timeColumn,
-            tableConfig.getTableName());
+    Preconditions.checkState(fieldSpec != null, "No valid spec found for time column: %s in schema for table: %s", timeColumn, tableConfig.getTableName());
 
     TimeHandlerConfig.Builder timeHandlerConfigBuilder = new TimeHandlerConfig.Builder(TimeHandler.Type.EPOCH);
 
@@ -89,22 +86,20 @@ public class MergeTaskUtils {
   /**
    * Creates the partitioner configs based on the given table config, schema and task config.
    */
-  public static List<PartitionerConfig> getPartitionerConfigs(TableConfig tableConfig, Schema schema,
-      Map<String, String> taskConfig) {
+  public static List<PartitionerConfig> getPartitionerConfigs(TableConfig tableConfig, Schema schema, Map<String, String> taskConfig) {
     SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
     if (segmentPartitionConfig == null) {
       return Collections.emptyList();
     }
     Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
-    Preconditions.checkState(columnPartitionMap.size() == 1, "Cannot partition on multiple columns for table: %s",
-        tableConfig.getTableName());
+    Preconditions.checkState(columnPartitionMap.size() == 1, "Cannot partition on multiple columns for table: %s", tableConfig.getTableName());
     Map.Entry<String, ColumnPartitionConfig> entry = columnPartitionMap.entrySet().iterator().next();
     String partitionColumn = entry.getKey();
-    Preconditions.checkState(schema.hasColumn(partitionColumn),
-        "Partition column: %s does not exist in the schema for table: %s", partitionColumn, tableConfig.getTableName());
+    Preconditions.checkState(schema.hasColumn(partitionColumn), "Partition column: %s does not exist in the schema for table: %s", partitionColumn,
+        tableConfig.getTableName());
     PartitionerConfig partitionerConfig =
-        new PartitionerConfig.Builder().setPartitionerType(PartitionerFactory.PartitionerType.TABLE_PARTITION_CONFIG)
-            .setColumnName(partitionColumn).setColumnPartitionConfig(entry.getValue()).build();
+        new PartitionerConfig.Builder().setPartitionerType(PartitionerFactory.PartitionerType.TABLE_PARTITION_CONFIG).setColumnName(partitionColumn)
+            .setColumnPartitionConfig(entry.getValue()).build();
     return Collections.singletonList(partitionerConfig);
   }
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionResult.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionResult.java
index 75eba9e..56a0dde 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionResult.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionResult.java
@@ -34,8 +34,7 @@ public class SegmentConversionResult {
   private final String _segmentName;
   private final Map<String, Object> _customProperties;
 
-  private SegmentConversionResult(File file, String tableNameWithType, String segmentName,
-      Map<String, Object> customProperties) {
+  private SegmentConversionResult(File file, String tableNameWithType, String segmentName, Map<String, Object> customProperties) {
     _file = file;
     _tableNameWithType = tableNameWithType;
     _segmentName = segmentName;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionUtils.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionUtils.java
index bb74d48..1d81a9e 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionUtils.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/SegmentConversionUtils.java
@@ -54,21 +54,17 @@ public class SegmentConversionUtils {
   private SegmentConversionUtils() {
   }
 
-  public static void uploadSegment(Map<String, String> configs, List<Header> httpHeaders,
-      List<NameValuePair> parameters, String tableNameWithType, String segmentName, String uploadURL, File fileToUpload)
+  public static void uploadSegment(Map<String, String> configs, List<Header> httpHeaders, List<NameValuePair> parameters, String tableNameWithType,
+      String segmentName, String uploadURL, File fileToUpload)
       throws Exception {
     // Generate retry policy based on the config
     String maxNumAttemptsConfig = configs.get(MinionConstants.MAX_NUM_ATTEMPTS_KEY);
-    int maxNumAttempts =
-        maxNumAttemptsConfig != null ? Integer.parseInt(maxNumAttemptsConfig) : DEFAULT_MAX_NUM_ATTEMPTS;
+    int maxNumAttempts = maxNumAttemptsConfig != null ? Integer.parseInt(maxNumAttemptsConfig) : DEFAULT_MAX_NUM_ATTEMPTS;
     String initialRetryDelayMsConfig = configs.get(MinionConstants.INITIAL_RETRY_DELAY_MS_KEY);
-    long initialRetryDelayMs =
-        initialRetryDelayMsConfig != null ? Long.parseLong(initialRetryDelayMsConfig) : DEFAULT_INITIAL_RETRY_DELAY_MS;
+    long initialRetryDelayMs = initialRetryDelayMsConfig != null ? Long.parseLong(initialRetryDelayMsConfig) : DEFAULT_INITIAL_RETRY_DELAY_MS;
     String retryScaleFactorConfig = configs.get(MinionConstants.RETRY_SCALE_FACTOR_KEY);
-    double retryScaleFactor =
-        retryScaleFactorConfig != null ? Double.parseDouble(retryScaleFactorConfig) : DEFAULT_RETRY_SCALE_FACTOR;
-    RetryPolicy retryPolicy =
-        RetryPolicies.exponentialBackoffRetryPolicy(maxNumAttempts, initialRetryDelayMs, retryScaleFactor);
+    double retryScaleFactor = retryScaleFactorConfig != null ? Double.parseDouble(retryScaleFactorConfig) : DEFAULT_RETRY_SCALE_FACTOR;
+    RetryPolicy retryPolicy = RetryPolicies.exponentialBackoffRetryPolicy(maxNumAttempts, initialRetryDelayMs, retryScaleFactor);
 
     // Upload the segment with retry policy
     SSLContext sslContext = MinionContext.getInstance().getSSLContext();
@@ -76,10 +72,9 @@ public class SegmentConversionUtils {
       retryPolicy.attempt(() -> {
         try {
           SimpleHttpResponse response = fileUploadDownloadClient
-              .uploadSegment(new URI(uploadURL), segmentName, fileToUpload, httpHeaders, parameters,
-                  FileUploadDownloadClient.DEFAULT_SOCKET_TIMEOUT_MS);
-          LOGGER.info("Got response {}: {} while uploading table: {}, segment: {} with uploadURL: {}",
-              response.getStatusCode(), response.getResponse(), tableNameWithType, segmentName, uploadURL);
+              .uploadSegment(new URI(uploadURL), segmentName, fileToUpload, httpHeaders, parameters, FileUploadDownloadClient.DEFAULT_SOCKET_TIMEOUT_MS);
+          LOGGER.info("Got response {}: {} while uploading table: {}, segment: {} with uploadURL: {}", response.getStatusCode(), response.getResponse(),
+              tableNameWithType, segmentName, uploadURL);
           return true;
         } catch (HttpErrorStatusException e) {
           int statusCode = e.getStatusCode();
@@ -100,19 +95,17 @@ public class SegmentConversionUtils {
     }
   }
 
-  public static String startSegmentReplace(String tableNameWithType, String uploadURL,
-      StartReplaceSegmentsRequest startReplaceSegmentsRequest)
+  public static String startSegmentReplace(String tableNameWithType, String uploadURL, StartReplaceSegmentsRequest startReplaceSegmentsRequest)
       throws Exception {
     String rawTableName = TableNameBuilder.extractRawTableName(tableNameWithType);
     TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableNameWithType);
     SSLContext sslContext = MinionContext.getInstance().getSSLContext();
     try (FileUploadDownloadClient fileUploadDownloadClient = new FileUploadDownloadClient(sslContext)) {
-      URI uri = FileUploadDownloadClient
-          .getStartReplaceSegmentsURI(new URI(uploadURL), rawTableName, tableType.name());
+      URI uri = FileUploadDownloadClient.getStartReplaceSegmentsURI(new URI(uploadURL), rawTableName, tableType.name());
       SimpleHttpResponse response = fileUploadDownloadClient.startReplaceSegments(uri, startReplaceSegmentsRequest);
       String responseString = response.getResponse();
-      LOGGER.info("Got response {}: {} while uploading table: {}, uploadURL: {}, request: {}", response.getStatusCode(),
-          responseString, tableNameWithType, uploadURL, startReplaceSegmentsRequest);
+      LOGGER.info("Got response {}: {} while uploading table: {}, uploadURL: {}, request: {}", response.getStatusCode(), responseString, tableNameWithType,
+          uploadURL, startReplaceSegmentsRequest);
       return JsonUtils.stringToJsonNode(responseString).get("segmentLineageEntryId").asText();
     }
   }
@@ -123,11 +116,10 @@ public class SegmentConversionUtils {
     TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableNameWithType);
     SSLContext sslContext = MinionContext.getInstance().getSSLContext();
     try (FileUploadDownloadClient fileUploadDownloadClient = new FileUploadDownloadClient(sslContext)) {
-      URI uri = FileUploadDownloadClient
-          .getEndReplaceSegmentsURI(new URI(uploadURL), rawTableName, tableType.name(), segmentLineageEntryId);
+      URI uri = FileUploadDownloadClient.getEndReplaceSegmentsURI(new URI(uploadURL), rawTableName, tableType.name(), segmentLineageEntryId);
       SimpleHttpResponse response = fileUploadDownloadClient.endReplaceSegments(uri);
-      LOGGER.info("Got response {}: {} while uploading table: {}, uploadURL: {}", response.getStatusCode(),
-          response.getResponse(), tableNameWithType, uploadURL);
+      LOGGER
+          .info("Got response {}: {} while uploading table: {}, uploadURL: {}", response.getStatusCode(), response.getResponse(), tableNameWithType, uploadURL);
     }
   }
 }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutor.java
similarity index 81%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutor.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutor.java
index 16d04a3..cb5d26c 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.convert_to_raw_index;
+package org.apache.pinot.plugin.minion.tasks.converttorawindex;
 
 import java.io.File;
 import java.util.Collections;
@@ -38,18 +38,15 @@ public class ConvertToRawIndexTaskExecutor extends BaseSingleSegmentConversionEx
     Map<String, String> configs = pinotTaskConfig.getConfigs();
     String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY);
     String rawTableName = TableNameBuilder.extractRawTableName(tableNameWithType);
-    new RawIndexConverter(rawTableName, indexDir, workingDir,
-        configs.get(MinionConstants.ConvertToRawIndexTask.COLUMNS_TO_CONVERT_KEY)).convert();
-    return new SegmentConversionResult.Builder().setFile(workingDir)
-        .setTableNameWithType(configs.get(MinionConstants.TABLE_NAME_KEY))
+    new RawIndexConverter(rawTableName, indexDir, workingDir, configs.get(MinionConstants.ConvertToRawIndexTask.COLUMNS_TO_CONVERT_KEY)).convert();
+    return new SegmentConversionResult.Builder().setFile(workingDir).setTableNameWithType(configs.get(MinionConstants.TABLE_NAME_KEY))
         .setSegmentName(configs.get(MinionConstants.SEGMENT_NAME_KEY)).build();
   }
 
   @Override
-  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(
-      PinotTaskConfig pinotTaskConfig, SegmentConversionResult segmentConversionResult) {
+  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
+      SegmentConversionResult segmentConversionResult) {
     return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE, Collections
-        .singletonMap(MinionConstants.ConvertToRawIndexTask.TASK_TYPE + MinionConstants.TASK_TIME_SUFFIX,
-            String.valueOf(System.currentTimeMillis())));
+        .singletonMap(MinionConstants.ConvertToRawIndexTask.TASK_TYPE + MinionConstants.TASK_TIME_SUFFIX, String.valueOf(System.currentTimeMillis())));
   }
 }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutorFactory.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutorFactory.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutorFactory.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutorFactory.java
index 6311fda..59e805a 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskExecutorFactory.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskExecutorFactory.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.convert_to_raw_index;
+package org.apache.pinot.plugin.minion.tasks.converttorawindex;
 
 import org.apache.pinot.core.common.MinionConstants;
 import org.apache.pinot.minion.executor.MinionTaskZkMetadataManager;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskGenerator.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskGenerator.java
similarity index 90%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskGenerator.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskGenerator.java
index 84bf149..a7d31f5 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/convert_to_raw_index/ConvertToRawIndexTaskGenerator.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/converttorawindex/ConvertToRawIndexTaskGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.convert_to_raw_index;
+package org.apache.pinot.plugin.minion.tasks.converttorawindex;
 
 import com.google.common.base.Preconditions;
 import java.util.ArrayList;
@@ -60,8 +60,7 @@ public class ConvertToRawIndexTaskGenerator implements PinotTaskGenerator {
     List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
 
     // Get the segments that are being converted so that we don't submit them again
-    Set<Segment> runningSegments =
-        TaskGeneratorUtils.getRunningSegments(MinionConstants.ConvertToRawIndexTask.TASK_TYPE, _clusterInfoAccessor);
+    Set<Segment> runningSegments = TaskGeneratorUtils.getRunningSegments(MinionConstants.ConvertToRawIndexTask.TASK_TYPE, _clusterInfoAccessor);
 
     for (TableConfig tableConfig : tableConfigs) {
       // Only generate tasks for OFFLINE tables
@@ -73,8 +72,7 @@ public class ConvertToRawIndexTaskGenerator implements PinotTaskGenerator {
 
       TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig();
       Preconditions.checkNotNull(tableTaskConfig);
-      Map<String, String> taskConfigs =
-          tableTaskConfig.getConfigsForTaskType(MinionConstants.ConvertToRawIndexTask.TASK_TYPE);
+      Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(MinionConstants.ConvertToRawIndexTask.TASK_TYPE);
       Preconditions.checkNotNull(taskConfigs, "Task config shouldn't be null for Table: {}", offlineTableName);
 
       // Get max number of tasks for this table
@@ -109,8 +107,7 @@ public class ConvertToRawIndexTaskGenerator implements PinotTaskGenerator {
 
         // Only submit segments that have not been converted
         Map<String, String> customMap = segmentZKMetadata.getCustomMap();
-        if (customMap == null || !customMap.containsKey(
-            MinionConstants.ConvertToRawIndexTask.COLUMNS_TO_CONVERT_KEY + MinionConstants.TASK_TIME_SUFFIX)) {
+        if (customMap == null || !customMap.containsKey(MinionConstants.ConvertToRawIndexTask.COLUMNS_TO_CONVERT_KEY + MinionConstants.TASK_TIME_SUFFIX)) {
           Map<String, String> configs = new HashMap<>();
           configs.put(MinionConstants.TABLE_NAME_KEY, offlineTableName);
           configs.put(MinionConstants.SEGMENT_NAME_KEY, segmentName);
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutor.java
similarity index 86%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutor.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutor.java
index 86d6af4..7af29dd 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -47,8 +47,7 @@ public class MergeRollupTaskExecutor extends BaseMultipleSegmentsConversionExecu
   private static final Logger LOGGER = LoggerFactory.getLogger(MergeRollupTaskExecutor.class);
 
   @Override
-  protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs,
-      File workingDir)
+  protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs, File workingDir)
       throws Exception {
     String taskType = pinotTaskConfig.getTaskType();
     Map<String, String> configs = pinotTaskConfig.getConfigs();
@@ -59,16 +58,13 @@ public class MergeRollupTaskExecutor extends BaseMultipleSegmentsConversionExecu
     TableConfig tableConfig = getTableConfig(tableNameWithType);
     Schema schema = getSchema(tableNameWithType);
 
-    SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
-        new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
+    SegmentProcessorConfig.Builder segmentProcessorConfigBuilder = new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
 
     // Time handler config
-    segmentProcessorConfigBuilder
-        .setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
+    segmentProcessorConfigBuilder.setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
 
     // Partitioner config
-    segmentProcessorConfigBuilder
-        .setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
+    segmentProcessorConfigBuilder.setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
 
     // Merge type
     segmentProcessorConfigBuilder.setMergeType(MergeTaskUtils.getMergeType(configs));
@@ -102,8 +98,8 @@ public class MergeRollupTaskExecutor extends BaseMultipleSegmentsConversionExecu
     List<SegmentConversionResult> results = new ArrayList<>();
     for (File outputSegmentDir : outputSegmentDirs) {
       String outputSegmentName = outputSegmentDir.getName();
-      results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName)
-          .setTableNameWithType(tableNameWithType).build());
+      results.add(
+          new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName).setTableNameWithType(tableNameWithType).build());
     }
     return results;
   }
@@ -112,8 +108,7 @@ public class MergeRollupTaskExecutor extends BaseMultipleSegmentsConversionExecu
   protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
       SegmentConversionResult segmentConversionResult) {
     Map<String, String> updateMap = new TreeMap<>();
-    updateMap.put(MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY,
-        pinotTaskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
+    updateMap.put(MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, pinotTaskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
     updateMap.put(MergeRollupTask.SEGMENT_ZK_METADATA_TIME_KEY, String.valueOf(System.currentTimeMillis()));
     return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE, updateMap);
   }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorFactory.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorFactory.java
similarity index 96%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorFactory.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorFactory.java
index 511f5e7..6183f7b 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorFactory.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorFactory.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import org.apache.pinot.core.common.MinionConstants;
 import org.apache.pinot.minion.executor.MinionTaskZkMetadataManager;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGenerator.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGenerator.java
similarity index 79%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGenerator.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGenerator.java
index 21f1918..0b4e054 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGenerator.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import com.google.common.base.Preconditions;
 import java.util.ArrayList;
@@ -135,8 +135,7 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
       }
 
       if (preSelectedSegments.isEmpty()) {
-        LOGGER
-            .info("Skip generating task: {} for table: {}, no segment is found to merge.", taskType, offlineTableName);
+        LOGGER.info("Skip generating task: {} for table: {}, no segment is found to merge.", taskType, offlineTableName);
         continue;
       }
 
@@ -149,22 +148,19 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
         }
         long aEndTime = a.getEndTimeMs();
         long bEndTime = b.getEndTimeMs();
-        return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime)
-            : a.getSegmentName().compareTo(b.getSegmentName());
+        return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime) : a.getSegmentName().compareTo(b.getSegmentName());
       });
 
       // Sort merge levels based on bucket time period
       Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
       Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs);
-      List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs =
-          new ArrayList<>(mergeLevelToConfigs.entrySet());
-      sortedMergeLevelConfigs.sort(Comparator.comparingLong(
-          e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
+      List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs = new ArrayList<>(mergeLevelToConfigs.entrySet());
+      sortedMergeLevelConfigs
+          .sort(Comparator.comparingLong(e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
 
       // Get incomplete merge levels
       Set<String> inCompleteMergeLevels = new HashSet<>();
-      for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils
-          .getIncompleteTasks(taskType, offlineTableName, _clusterInfoAccessor).entrySet()) {
+      for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, offlineTableName, _clusterInfoAccessor).entrySet()) {
         for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) {
           inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
         }
@@ -172,9 +168,8 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
 
       ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor.getMinionMergeRollupTaskZNRecord(offlineTableName);
       int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1;
-      MergeRollupTaskMetadata mergeRollupTaskMetadata =
-          mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
-              : new MergeRollupTaskMetadata(offlineTableName, new TreeMap<>());
+      MergeRollupTaskMetadata mergeRollupTaskMetadata = mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
+          : new MergeRollupTaskMetadata(offlineTableName, new TreeMap<>());
       List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>();
 
       // Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly)
@@ -186,28 +181,24 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
 
         // Skip scheduling if there's incomplete task for current mergeLevel
         if (inCompleteMergeLevels.contains(mergeLevel)) {
-          LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}",
-              mergeLevel, offlineTableName, taskType);
+          LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}", mergeLevel, offlineTableName, taskType);
           continue;
         }
 
         // Get the bucket size and buffer
-        long bucketMs =
-            TimeUtils.convertPeriodToMillis(mergeConfigs.get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY));
-        long bufferMs =
-            TimeUtils.convertPeriodToMillis(mergeConfigs.get(MinionConstants.MergeTask.BUFFER_TIME_PERIOD_KEY));
+        long bucketMs = TimeUtils.convertPeriodToMillis(mergeConfigs.get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY));
+        long bufferMs = TimeUtils.convertPeriodToMillis(mergeConfigs.get(MinionConstants.MergeTask.BUFFER_TIME_PERIOD_KEY));
 
         // Get watermark from MergeRollupTaskMetadata ZNode
         // windowStartMs = watermarkMs, windowEndMs = windowStartMs + bucketTimeMs
-        long waterMarkMs =
-            getWatermarkMs(preSelectedSegments.get(0).getStartTimeMs(), bucketMs, mergeLevel, mergeRollupTaskMetadata);
+        long waterMarkMs = getWatermarkMs(preSelectedSegments.get(0).getStartTimeMs(), bucketMs, mergeLevel, mergeRollupTaskMetadata);
         long windowStartMs = waterMarkMs;
         long windowEndMs = windowStartMs + bucketMs;
 
         if (!isValidMergeWindowEndTime(windowEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata)) {
-          LOGGER.info(
-              "Window with start: {} and end: {} of mergeLevel: {} is not a valid merge window, Skipping task generation: {}",
-              windowStartMs, windowEndMs, mergeLevel, taskType);
+          LOGGER
+              .info("Window with start: {} and end: {} of mergeLevel: {} is not a valid merge window, Skipping task generation: {}", windowStartMs, windowEndMs,
+                  mergeLevel, taskType);
           continue;
         }
 
@@ -259,36 +250,31 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
         }
 
         if (!isValidMergeWindow) {
-          LOGGER.info(
-              "Window with start: {} and end: {} of mergeLevel: {} is not a valid merge window, Skipping task generation: {}",
-              windowStartMs, windowEndMs, mergeLevel, taskType);
+          LOGGER
+              .info("Window with start: {} and end: {} of mergeLevel: {} is not a valid merge window, Skipping task generation: {}", windowStartMs, windowEndMs,
+                  mergeLevel, taskType);
           continue;
         }
 
         if (!hasUnmergedSegments) {
-          LOGGER.info("No unmerged segments found for mergeLevel:{} for table: {}, Skipping task generation: {}",
-              mergeLevel, offlineTableName, taskType);
+          LOGGER.info("No unmerged segments found for mergeLevel:{} for table: {}, Skipping task generation: {}", mergeLevel, offlineTableName, taskType);
           continue;
         }
 
         Long prevWatermarkMs = mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, windowStartMs);
-        LOGGER.info("Update watermark of mergeLevel: {} for table: {} from: {} to: {}", mergeLevel, offlineTableName,
-            prevWatermarkMs, waterMarkMs);
+        LOGGER.info("Update watermark of mergeLevel: {} for table: {} from: {} to: {}", mergeLevel, offlineTableName, prevWatermarkMs, waterMarkMs);
 
         // Create task configs
         int maxNumRecordsPerTask = mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer
-            .parseInt(mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY))
-            : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
+            .parseInt(mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
         SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
         if (segmentPartitionConfig == null) {
-          pinotTaskConfigsForTable.addAll(
-              createPinotTaskConfigs(selectedSegments, offlineTableName, maxNumRecordsPerTask, mergeLevel, mergeConfigs,
-                  taskConfigs));
+          pinotTaskConfigsForTable
+              .addAll(createPinotTaskConfigs(selectedSegments, offlineTableName, maxNumRecordsPerTask, mergeLevel, mergeConfigs, taskConfigs));
         } else {
           // For partitioned table, schedule separate tasks for each partition
           Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
-          Preconditions.checkState(columnPartitionMap.size() == 1, "Cannot partition on multiple columns for table: %s",
-              tableConfig.getTableName());
+          Preconditions.checkState(columnPartitionMap.size() == 1, "Cannot partition on multiple columns for table: %s", tableConfig.getTableName());
           Map.Entry<String, ColumnPartitionConfig> partitionEntry = columnPartitionMap.entrySet().iterator().next();
           String partitionColumn = partitionEntry.getKey();
 
@@ -297,8 +283,7 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
           List<SegmentZKMetadata> outlierSegments = new ArrayList<>();
           for (SegmentZKMetadata selectedSegment : selectedSegments) {
             SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata();
-            if (segmentPartitionMetadata == null
-                || segmentPartitionMetadata.getPartitions(partitionColumn).size() != 1) {
+            if (segmentPartitionMetadata == null || segmentPartitionMetadata.getPartitions(partitionColumn).size() != 1) {
               outlierSegments.add(selectedSegment);
             } else {
               int partition = segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next();
@@ -308,14 +293,12 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
 
           for (Map.Entry<Integer, List<SegmentZKMetadata>> partitionToSegmentsEntry : partitionToSegments.entrySet()) {
             pinotTaskConfigsForTable.addAll(
-                createPinotTaskConfigs(partitionToSegmentsEntry.getValue(), offlineTableName, maxNumRecordsPerTask,
-                    mergeLevel, mergeConfigs, taskConfigs));
+                createPinotTaskConfigs(partitionToSegmentsEntry.getValue(), offlineTableName, maxNumRecordsPerTask, mergeLevel, mergeConfigs, taskConfigs));
           }
 
           if (!outlierSegments.isEmpty()) {
-            pinotTaskConfigsForTable.addAll(
-                createPinotTaskConfigs(outlierSegments, offlineTableName, maxNumRecordsPerTask, mergeLevel,
-                    mergeConfigs, taskConfigs));
+            pinotTaskConfigsForTable
+                .addAll(createPinotTaskConfigs(outlierSegments, offlineTableName, maxNumRecordsPerTask, mergeLevel, mergeConfigs, taskConfigs));
           }
         }
       }
@@ -324,15 +307,12 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
       try {
         _clusterInfoAccessor.setMergeRollupTaskMetadata(mergeRollupTaskMetadata, expectedVersion);
       } catch (ZkException e) {
-        LOGGER.error(
-            "Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are multiple task schedulers for the same table, need to investigate!",
-            offlineTableName);
+        LOGGER.error("Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are "
+            + "multiple task schedulers for the same table, need to investigate!", offlineTableName);
         continue;
       }
       pinotTaskConfigs.addAll(pinotTaskConfigsForTable);
-      LOGGER
-          .info("Finished generating task configs for table: {} for task: {}, numTasks: {}", offlineTableName, taskType,
-              pinotTaskConfigsForTable.size());
+      LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", offlineTableName, taskType, pinotTaskConfigsForTable.size());
     }
     return pinotTaskConfigs;
   }
@@ -343,14 +323,12 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
   private boolean validate(TableConfig tableConfig, String taskType) {
     String offlineTableName = tableConfig.getTableName();
     if (tableConfig.getTableType() != TableType.OFFLINE) {
-      LOGGER.warn("Skip generating task: {} for non-OFFLINE table: {}, REALTIME table is not supported yet", taskType,
-          offlineTableName);
+      LOGGER.warn("Skip generating task: {} for non-OFFLINE table: {}, REALTIME table is not supported yet", taskType, offlineTableName);
       return false;
     }
 
     if (REFRESH.equalsIgnoreCase(IngestionConfigUtils.getBatchSegmentIngestionType(tableConfig))) {
-      LOGGER.warn("Skip generating task: {} for non-APPEND table: {}, REFRESH table is not supported", taskType,
-          offlineTableName);
+      LOGGER.warn("Skip generating task: {} for non-APPEND table: {}, REFRESH table is not supported", taskType, offlineTableName);
       return false;
     }
     return true;
@@ -361,30 +339,27 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
    */
   private boolean isMergedSegment(SegmentZKMetadata segmentZKMetadata, String mergeLevel) {
     Map<String, String> customMap = segmentZKMetadata.getCustomMap();
-    return customMap != null && mergeLevel
-        .equalsIgnoreCase(customMap.get(MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY));
+    return customMap != null && mergeLevel.equalsIgnoreCase(customMap.get(MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY));
   }
 
   /**
    * Check if the merge window end time is valid
    */
-  private boolean isValidMergeWindowEndTime(long windowEndMs, long bufferMs, String lowerMergeLevel,
-      MergeRollupTaskMetadata mergeRollupTaskMetadata) {
+  private boolean isValidMergeWindowEndTime(long windowEndMs, long bufferMs, String lowerMergeLevel, MergeRollupTaskMetadata mergeRollupTaskMetadata) {
     // Check that execution window endTimeMs <= now - bufferTime
     if (windowEndMs > System.currentTimeMillis() - bufferMs) {
       return false;
     }
     // Check that execution window endTimeMs <= waterMark of the lower mergeLevel
-    return lowerMergeLevel == null || mergeRollupTaskMetadata.getWatermarkMap().get(lowerMergeLevel) == null
-        || windowEndMs <= mergeRollupTaskMetadata.getWatermarkMap().get(lowerMergeLevel);
+    return lowerMergeLevel == null || mergeRollupTaskMetadata.getWatermarkMap().get(lowerMergeLevel) == null || windowEndMs <= mergeRollupTaskMetadata
+        .getWatermarkMap().get(lowerMergeLevel);
   }
 
   /**
    * Get the watermark from the MergeRollupMetadata ZNode.
    * If the znode is null, computes the watermark using the start time from segment metadata
    */
-  private long getWatermarkMs(long minStartTimeMs, long bucketMs, String mergeLevel,
-      MergeRollupTaskMetadata mergeRollupTaskMetadata) {
+  private long getWatermarkMs(long minStartTimeMs, long bucketMs, String mergeLevel, MergeRollupTaskMetadata mergeRollupTaskMetadata) {
     long watermarkMs;
     if (mergeRollupTaskMetadata.getWatermarkMap().get(mergeLevel) == null) {
       // No ZNode exists. Cold-start.
@@ -400,9 +375,8 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
   /**
    * Create pinot task configs with selected segments and configs
    */
-  private List<PinotTaskConfig> createPinotTaskConfigs(List<SegmentZKMetadata> selectedSegments,
-      String offlineTableName, int maxNumRecordsPerTask, String mergeLevel, Map<String, String> mergeConfigs,
-      Map<String, String> taskConfigs) {
+  private List<PinotTaskConfig> createPinotTaskConfigs(List<SegmentZKMetadata> selectedSegments, String offlineTableName, int maxNumRecordsPerTask,
+      String mergeLevel, Map<String, String> mergeConfigs, Map<String, String> taskConfigs) {
     int numRecordsPerTask = 0;
     List<List<String>> segmentNamesList = new ArrayList<>();
     List<List<String>> downloadURLsList = new ArrayList<>();
@@ -427,10 +401,8 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
     for (int i = 0; i < segmentNamesList.size(); i++) {
       Map<String, String> configs = new HashMap<>();
       configs.put(MinionConstants.TABLE_NAME_KEY, offlineTableName);
-      configs.put(MinionConstants.SEGMENT_NAME_KEY,
-          StringUtils.join(segmentNamesList.get(i), MinionConstants.SEGMENT_NAME_SEPARATOR));
-      configs.put(MinionConstants.DOWNLOAD_URL_KEY,
-          StringUtils.join(downloadURLsList.get(i), MinionConstants.URL_SEPARATOR));
+      configs.put(MinionConstants.SEGMENT_NAME_KEY, StringUtils.join(segmentNamesList.get(i), MinionConstants.SEGMENT_NAME_SEPARATOR));
+      configs.put(MinionConstants.DOWNLOAD_URL_KEY, StringUtils.join(downloadURLsList.get(i), MinionConstants.URL_SEPARATOR));
       configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments");
       configs.put(MinionConstants.ENABLE_REPLACE_SEGMENTS_KEY, "true");
 
@@ -442,16 +414,13 @@ public class MergeRollupTaskGenerator implements PinotTaskGenerator {
 
       configs.put(MergeRollupTask.MERGE_TYPE_KEY, mergeConfigs.get(MinionConstants.MergeTask.MERGE_TYPE_KEY));
       configs.put(MergeRollupTask.MERGE_LEVEL_KEY, mergeLevel);
-      configs.put(MinionConstants.MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY,
-          mergeConfigs.get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY));
-      configs.put(MinionConstants.MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY,
-          mergeConfigs.get(MinionConstants.MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY));
-      configs.put(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY,
-          mergeConfigs.get(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY));
+      configs.put(MinionConstants.MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY, mergeConfigs.get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY));
+      configs.put(MinionConstants.MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY, mergeConfigs.get(MinionConstants.MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY));
+      configs.put(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, mergeConfigs.get(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY));
 
       configs.put(MergeRollupTask.SEGMENT_NAME_PREFIX_KEY,
-          MergeRollupTask.MERGED_SEGMENT_NAME_PREFIX + mergeLevel + "_" + System.currentTimeMillis() + "_" + i + "_"
-              + TableNameBuilder.extractRawTableName(offlineTableName));
+          MergeRollupTask.MERGED_SEGMENT_NAME_PREFIX + mergeLevel + "_" + System.currentTimeMillis() + "_" + i + "_" + TableNameBuilder
+              .extractRawTableName(offlineTableName));
       pinotTaskConfigs.add(new PinotTaskConfig(MergeRollupTask.TASK_TYPE, configs));
     }
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtils.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtils.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtils.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtils.java
index 79733a2..357cd73 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtils.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtils.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import java.util.Map;
 import java.util.TreeMap;
@@ -24,6 +24,9 @@ import org.apache.pinot.core.common.MinionConstants.MergeTask;
 
 
 public class MergeRollupTaskUtils {
+  private MergeRollupTaskUtils() {
+  }
+
   //@formatter:off
   private static final String[] VALID_CONFIG_KEYS = {
       MergeTask.BUCKET_TIME_PERIOD_KEY,
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutor.java
index 8ba472b..0675d3f 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutor.java
@@ -46,11 +46,9 @@ public class PurgeTaskExecutor extends BaseSingleSegmentConversionExecutor {
 
     TableConfig tableConfig = getTableConfig(tableNameWithType);
     SegmentPurger.RecordPurgerFactory recordPurgerFactory = MINION_CONTEXT.getRecordPurgerFactory();
-    SegmentPurger.RecordPurger recordPurger =
-        recordPurgerFactory != null ? recordPurgerFactory.getRecordPurger(rawTableName) : null;
+    SegmentPurger.RecordPurger recordPurger = recordPurgerFactory != null ? recordPurgerFactory.getRecordPurger(rawTableName) : null;
     SegmentPurger.RecordModifierFactory recordModifierFactory = MINION_CONTEXT.getRecordModifierFactory();
-    SegmentPurger.RecordModifier recordModifier =
-        recordModifierFactory != null ? recordModifierFactory.getRecordModifier(rawTableName) : null;
+    SegmentPurger.RecordModifier recordModifier = recordModifierFactory != null ? recordModifierFactory.getRecordModifier(rawTableName) : null;
 
     SegmentPurger segmentPurger = new SegmentPurger(indexDir, workingDir, tableConfig, recordPurger, recordModifier);
     File purgedSegmentFile = segmentPurger.purgeSegment();
@@ -59,18 +57,16 @@ public class PurgeTaskExecutor extends BaseSingleSegmentConversionExecutor {
     }
 
     return new SegmentConversionResult.Builder().setFile(purgedSegmentFile).setTableNameWithType(tableNameWithType)
-        .setSegmentName(configs.get(MinionConstants.SEGMENT_NAME_KEY))
-        .setCustomProperty(RECORD_PURGER_KEY, segmentPurger.getRecordPurger())
+        .setSegmentName(configs.get(MinionConstants.SEGMENT_NAME_KEY)).setCustomProperty(RECORD_PURGER_KEY, segmentPurger.getRecordPurger())
         .setCustomProperty(RECORD_MODIFIER_KEY, segmentPurger.getRecordModifier())
         .setCustomProperty(NUM_RECORDS_PURGED_KEY, segmentPurger.getNumRecordsPurged())
         .setCustomProperty(NUM_RECORDS_MODIFIED_KEY, segmentPurger.getNumRecordsModified()).build();
   }
 
   @Override
-  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(
-      PinotTaskConfig pinotTaskConfig, SegmentConversionResult segmentConversionResult) {
-    return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE, Collections
-        .singletonMap(MinionConstants.PurgeTask.TASK_TYPE + MinionConstants.TASK_TIME_SUFFIX,
-            String.valueOf(System.currentTimeMillis())));
+  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
+      SegmentConversionResult segmentConversionResult) {
+    return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE,
+        Collections.singletonMap(MinionConstants.PurgeTask.TASK_TYPE + MinionConstants.TASK_TIME_SUFFIX, String.valueOf(System.currentTimeMillis())));
   }
 }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutor.java
similarity index 88%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutor.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutor.java
index aefd917..18c8146 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments;
+package org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments;
 
 import com.google.common.base.Preconditions;
 import java.io.File;
@@ -86,26 +86,22 @@ public class RealtimeToOfflineSegmentsTaskExecutor extends BaseMultipleSegmentsC
     Map<String, String> configs = pinotTaskConfig.getConfigs();
     String realtimeTableName = configs.get(MinionConstants.TABLE_NAME_KEY);
 
-    ZNRecord realtimeToOfflineSegmentsTaskZNRecord =
-        _minionTaskZkMetadataManager.getRealtimeToOfflineSegmentsTaskZNRecord(realtimeTableName);
+    ZNRecord realtimeToOfflineSegmentsTaskZNRecord = _minionTaskZkMetadataManager.getRealtimeToOfflineSegmentsTaskZNRecord(realtimeTableName);
     Preconditions.checkState(realtimeToOfflineSegmentsTaskZNRecord != null,
-        "RealtimeToOfflineSegmentsTaskMetadata ZNRecord for table: %s should not be null. Exiting task.",
-        realtimeTableName);
+        "RealtimeToOfflineSegmentsTaskMetadata ZNRecord for table: %s should not be null. Exiting task.", realtimeTableName);
 
     RealtimeToOfflineSegmentsTaskMetadata realtimeToOfflineSegmentsTaskMetadata =
         RealtimeToOfflineSegmentsTaskMetadata.fromZNRecord(realtimeToOfflineSegmentsTaskZNRecord);
     long windowStartMs = Long.parseLong(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY));
     Preconditions.checkState(realtimeToOfflineSegmentsTaskMetadata.getWatermarkMs() == windowStartMs,
         "watermarkMs in RealtimeToOfflineSegmentsTask metadata: %s does not match windowStartMs: %d in task configs for table: %s. "
-            + "ZNode may have been modified by another task", realtimeToOfflineSegmentsTaskMetadata, windowStartMs,
-        realtimeTableName);
+            + "ZNode may have been modified by another task", realtimeToOfflineSegmentsTaskMetadata, windowStartMs, realtimeTableName);
 
     _expectedVersion = realtimeToOfflineSegmentsTaskZNRecord.getVersion();
   }
 
   @Override
-  protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs,
-      File workingDir)
+  protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs, File workingDir)
       throws Exception {
     String taskType = pinotTaskConfig.getTaskType();
     Map<String, String> configs = pinotTaskConfig.getConfigs();
@@ -118,16 +114,13 @@ public class RealtimeToOfflineSegmentsTaskExecutor extends BaseMultipleSegmentsC
     TableConfig tableConfig = getTableConfig(offlineTableName);
     Schema schema = getSchema(offlineTableName);
 
-    SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
-        new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
+    SegmentProcessorConfig.Builder segmentProcessorConfigBuilder = new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
 
     // Time handler config
-    segmentProcessorConfigBuilder
-        .setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
+    segmentProcessorConfigBuilder.setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
 
     // Partitioner config
-    segmentProcessorConfigBuilder
-        .setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
+    segmentProcessorConfigBuilder.setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
 
     // Merge type
     MergeType mergeType = MergeTaskUtils.getMergeType(configs);
@@ -169,8 +162,8 @@ public class RealtimeToOfflineSegmentsTaskExecutor extends BaseMultipleSegmentsC
     List<SegmentConversionResult> results = new ArrayList<>();
     for (File outputSegmentDir : outputSegmentDirs) {
       String outputSegmentName = outputSegmentDir.getName();
-      results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName)
-          .setTableNameWithType(offlineTableName).build());
+      results.add(
+          new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName).setTableNameWithType(offlineTableName).build());
     }
     return results;
   }
@@ -185,15 +178,13 @@ public class RealtimeToOfflineSegmentsTaskExecutor extends BaseMultipleSegmentsC
     Map<String, String> configs = pinotTaskConfig.getConfigs();
     String realtimeTableName = configs.get(MinionConstants.TABLE_NAME_KEY);
     long waterMarkMs = Long.parseLong(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY));
-    RealtimeToOfflineSegmentsTaskMetadata newMinionMetadata =
-        new RealtimeToOfflineSegmentsTaskMetadata(realtimeTableName, waterMarkMs);
+    RealtimeToOfflineSegmentsTaskMetadata newMinionMetadata = new RealtimeToOfflineSegmentsTaskMetadata(realtimeTableName, waterMarkMs);
     _minionTaskZkMetadataManager.setRealtimeToOfflineSegmentsTaskMetadata(newMinionMetadata, _expectedVersion);
   }
 
   @Override
   protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
       SegmentConversionResult segmentConversionResult) {
-    return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE,
-        Collections.emptyMap());
+    return new SegmentZKMetadataCustomMapModifier(SegmentZKMetadataCustomMapModifier.ModifyMode.UPDATE, Collections.emptyMap());
   }
 }
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorFactory.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorFactory.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorFactory.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorFactory.java
index a15e1a4..ee70a45 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorFactory.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorFactory.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments;
+package org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments;
 
 import org.apache.pinot.core.common.MinionConstants;
 import org.apache.pinot.minion.executor.MinionTaskZkMetadataManager;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGenerator.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGenerator.java
similarity index 84%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGenerator.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGenerator.java
index 12ad34f..79311f4 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGenerator.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments;
+package org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments;
 
 import com.google.common.base.Preconditions;
 import java.util.ArrayList;
@@ -106,8 +106,7 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
         LOGGER.warn("Skip generating task: {} for non-REALTIME table: {}", taskType, realtimeTableName);
         continue;
       }
-      StreamConfig streamConfig =
-          new StreamConfig(realtimeTableName, IngestionConfigUtils.getStreamConfigMap(tableConfig));
+      StreamConfig streamConfig = new StreamConfig(realtimeTableName, IngestionConfigUtils.getStreamConfigMap(tableConfig));
       if (streamConfig.hasHighLevelConsumerType()) {
         LOGGER.warn("Skip generating task: {} for HLC REALTIME table: {}", taskType, realtimeTableName);
         continue;
@@ -115,12 +114,9 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
       LOGGER.info("Start generating task configs for table: {} for task: {}", realtimeTableName, taskType);
 
       // Only schedule 1 task of this type, per table
-      Map<String, TaskState> incompleteTasks =
-          TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor);
+      Map<String, TaskState> incompleteTasks = TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor);
       if (!incompleteTasks.isEmpty()) {
-        LOGGER
-            .warn("Found incomplete tasks: {} for same table: {}. Skipping task generation.", incompleteTasks.keySet(),
-                realtimeTableName);
+        LOGGER.warn("Found incomplete tasks: {} for same table: {}. Skipping task generation.", incompleteTasks.keySet(), realtimeTableName);
         continue;
       }
 
@@ -128,19 +124,15 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
       List<SegmentZKMetadata> completedSegmentsZKMetadata = new ArrayList<>();
       Map<Integer, String> partitionToLatestCompletedSegmentName = new HashMap<>();
       Set<Integer> allPartitions = new HashSet<>();
-      getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestCompletedSegmentName,
-          allPartitions);
+      getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestCompletedSegmentName, allPartitions);
       if (completedSegmentsZKMetadata.isEmpty()) {
-        LOGGER
-            .info("No realtime-completed segments found for table: {}, skipping task generation: {}", realtimeTableName,
-                taskType);
+        LOGGER.info("No realtime-completed segments found for table: {}, skipping task generation: {}", realtimeTableName, taskType);
         continue;
       }
       allPartitions.removeAll(partitionToLatestCompletedSegmentName.keySet());
       if (!allPartitions.isEmpty()) {
-        LOGGER
-            .info("Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.",
-                allPartitions, realtimeTableName, taskType);
+        LOGGER.info("Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.", allPartitions, realtimeTableName,
+            taskType);
         continue;
       }
 
@@ -150,10 +142,8 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
       Preconditions.checkState(taskConfigs != null, "Task config shouldn't be null for table: {}", realtimeTableName);
 
       // Get the bucket size and buffer
-      String bucketTimePeriod =
-          taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD);
-      String bufferTimePeriod =
-          taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD);
+      String bucketTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD);
+      String bufferTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD);
       long bucketMs = TimeUtils.convertPeriodToMillis(bucketTimePeriod);
       long bufferMs = TimeUtils.convertPeriodToMillis(bufferTimePeriod);
 
@@ -163,9 +153,8 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
 
       // Check that execution window is older than bufferTime
       if (windowEndMs > System.currentTimeMillis() - bufferMs) {
-        LOGGER.info(
-            "Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task generation: {}",
-            windowStartMs, windowEndMs, bufferMs, bufferTimePeriod, taskType);
+        LOGGER.info("Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task generation: {}", windowStartMs,
+            windowEndMs, bufferMs, bufferTimePeriod, taskType);
         continue;
       }
 
@@ -184,9 +173,7 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
           // If last completed segment is being used, make sure that segment crosses over end of window.
           // In the absence of this check, CONSUMING segments could contain some portion of the window. That data would be skipped forever.
           if (lastCompletedSegmentPerPartition.contains(segmentName) && segmentEndTimeMs < windowEndMs) {
-            LOGGER.info(
-                "Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task generation: {}",
-                segmentName, taskType);
+            LOGGER.info("Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task generation: {}", segmentName, taskType);
             skipGenerate = true;
             break;
           }
@@ -196,8 +183,7 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
       }
 
       if (segmentNames.isEmpty() || skipGenerate) {
-        LOGGER.info("Found no eligible segments for task: {} with window [{} - {}). Skipping task generation", taskType,
-            windowStartMs, windowEndMs);
+        LOGGER.info("Found no eligible segments for task: {} with window [{} - {}). Skipping task generation", taskType, windowStartMs, windowEndMs);
         continue;
       }
 
@@ -257,18 +243,17 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
 
       if (segmentZKMetadata.getStatus().equals(Segment.Realtime.Status.DONE)) {
         completedSegmentsZKMetadata.add(segmentZKMetadata);
-        latestLLCSegmentNameMap
-            .compute(llcSegmentName.getPartitionGroupId(), (partitionGroupId, latestLLCSegmentName) -> {
-              if (latestLLCSegmentName == null) {
-                return llcSegmentName;
-              } else {
-                if (llcSegmentName.getSequenceNumber() > latestLLCSegmentName.getSequenceNumber()) {
-                  return llcSegmentName;
-                } else {
-                  return latestLLCSegmentName;
-                }
-              }
-            });
+        latestLLCSegmentNameMap.compute(llcSegmentName.getPartitionGroupId(), (partitionGroupId, latestLLCSegmentName) -> {
+          if (latestLLCSegmentName == null) {
+            return llcSegmentName;
+          } else {
+            if (llcSegmentName.getSequenceNumber() > latestLLCSegmentName.getSequenceNumber()) {
+              return llcSegmentName;
+            } else {
+              return latestLLCSegmentName;
+            }
+          }
+        });
       }
     }
 
@@ -281,8 +266,7 @@ public class RealtimeToOfflineSegmentsTaskGenerator implements PinotTaskGenerato
    * Get the watermark from the RealtimeToOfflineSegmentsMetadata ZNode.
    * If the znode is null, computes the watermark using either the start time config or the start time from segment metadata
    */
-  private long getWatermarkMs(String realtimeTableName, List<SegmentZKMetadata> completedSegmentsZKMetadata,
-      long bucketMs) {
+  private long getWatermarkMs(String realtimeTableName, List<SegmentZKMetadata> completedSegmentsZKMetadata, long bucketMs) {
     RealtimeToOfflineSegmentsTaskMetadata realtimeToOfflineSegmentsTaskMetadata =
         _clusterInfoAccessor.getMinionRealtimeToOfflineSegmentsTaskMetadata(realtimeTableName);
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushResult.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushResult.java
similarity index 94%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushResult.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushResult.java
index a28d1ee..cf6ca18 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushResult.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushResult.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -32,8 +32,7 @@ public class SegmentGenerationAndPushResult {
   private final Exception _exception;
   private final Map<String, Object> _customProperties;
 
-  private SegmentGenerationAndPushResult(boolean succeed, String segmentName, Exception exception,
-      Map<String, Object> customProperties) {
+  private SegmentGenerationAndPushResult(boolean succeed, String segmentName, Exception exception, Map<String, Object> customProperties) {
     _succeed = succeed;
     _segmentName = segmentName;
     _exception = exception;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutor.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutor.java
similarity index 89%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutor.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutor.java
index 4487429..4d44f07 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutor.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutor.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import java.io.File;
 import java.io.IOException;
@@ -97,8 +97,8 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
   private static final long DEFAULT_PUSH_RETRY_INTERVAL_MILLIS = 1000L;
 
   @Override
-  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(
-      PinotTaskConfig pinotTaskConfig, SegmentConversionResult segmentConversionResult) {
+  protected SegmentZKMetadataCustomMapModifier getSegmentZKMetadataCustomMapModifier(PinotTaskConfig pinotTaskConfig,
+      SegmentConversionResult segmentConversionResult) {
     throw new UnsupportedOperationException();
   }
 
@@ -108,8 +108,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     LOGGER.info("Executing SegmentGenerationAndPushTask with task config: {}", pinotTaskConfig);
     Map<String, String> taskConfigs = pinotTaskConfig.getConfigs();
     SegmentGenerationAndPushResult.Builder resultBuilder = new SegmentGenerationAndPushResult.Builder();
-    File localTempDir = new File(new File(MinionContext.getInstance().getDataDir(), "SegmentGenerationAndPushResult"),
-        "tmp-" + UUID.randomUUID());
+    File localTempDir = new File(new File(MinionContext.getInstance().getDataDir(), "SegmentGenerationAndPushResult"), "tmp-" + UUID.randomUUID());
     try {
       SegmentGenerationTaskSpec taskSpec = generateTaskSpec(taskConfigs, localTempDir);
       return generateAndPushSegment(taskSpec, resultBuilder, taskConfigs);
@@ -121,9 +120,9 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     }
   }
 
-  private SegmentGenerationAndPushResult generateAndPushSegment(SegmentGenerationTaskSpec taskSpec,
-      SegmentGenerationAndPushResult.Builder resultBuilder,
-      Map<String, String> taskConfigs) throws Exception {
+  private SegmentGenerationAndPushResult generateAndPushSegment(SegmentGenerationTaskSpec taskSpec, SegmentGenerationAndPushResult.Builder resultBuilder,
+      Map<String, String> taskConfigs)
+      throws Exception {
     // Generate Pinot Segment
     SegmentGenerationTaskRunner taskRunner = new SegmentGenerationTaskRunner(taskSpec);
     String segmentName = taskRunner.run();
@@ -166,8 +165,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     switch (BatchConfigProperties.SegmentPushType.valueOf(pushMode.toUpperCase())) {
       case TAR:
         try {
-          SegmentPushUtils.pushSegments(spec, SegmentGenerationAndPushTaskUtils.getLocalPinotFs(),
-              Arrays.asList(outputSegmentTarURI.toString()));
+          SegmentPushUtils.pushSegments(spec, SegmentGenerationAndPushTaskUtils.getLocalPinotFs(), Arrays.asList(outputSegmentTarURI.toString()));
         } catch (RetriableOperationException | AttemptsExceededException e) {
           throw new RuntimeException(e);
         }
@@ -176,8 +174,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
         try {
           List<String> segmentUris = new ArrayList<>();
           URI updatedURI = SegmentPushUtils
-              .generateSegmentTarURI(outputSegmentDirURI, outputSegmentTarURI, pushJobSpec.getSegmentUriPrefix(),
-                  pushJobSpec.getSegmentUriSuffix());
+              .generateSegmentTarURI(outputSegmentDirURI, outputSegmentTarURI, pushJobSpec.getSegmentUriPrefix(), pushJobSpec.getSegmentUriSuffix());
           segmentUris.add(updatedURI.toString());
           SegmentPushUtils.sendSegmentUris(spec, segmentUris);
         } catch (RetriableOperationException | AttemptsExceededException e) {
@@ -187,8 +184,8 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
       case METADATA:
         try {
           Map<String, String> segmentUriToTarPathMap = SegmentPushUtils
-              .getSegmentUriToTarPathMap(outputSegmentDirURI, pushJobSpec.getSegmentUriPrefix(),
-                  pushJobSpec.getSegmentUriSuffix(), new String[]{outputSegmentTarURI.toString()});
+              .getSegmentUriToTarPathMap(outputSegmentDirURI, pushJobSpec.getSegmentUriPrefix(), pushJobSpec.getSegmentUriSuffix(),
+                  new String[]{outputSegmentTarURI.toString()});
           SegmentPushUtils.sendSegmentUriAndMetadata(spec, outputFileFS, segmentUriToTarPathMap);
         } catch (RetriableOperationException | AttemptsExceededException e) {
           throw new RuntimeException(e);
@@ -199,8 +196,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     }
   }
 
-  private SegmentGenerationJobSpec generatePushJobSpec(String tableName, Map<String, String> taskConfigs,
-      PushJobSpec pushJobSpec) {
+  private SegmentGenerationJobSpec generatePushJobSpec(String tableName, Map<String, String> taskConfigs, PushJobSpec pushJobSpec) {
 
     TableSpec tableSpec = new TableSpec();
     tableSpec.setTableName(tableName);
@@ -224,8 +220,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     URI outputSegmentDirURI = URI.create(taskConfigs.get(BatchConfigProperties.OUTPUT_SEGMENT_DIR_URI));
     PinotFS outputFileFS = SegmentGenerationAndPushTaskUtils.getOutputPinotFS(taskConfigs, outputSegmentDirURI);
     URI outputSegmentTarURI = URI.create(outputSegmentDirURI + localSegmentTarFile.getName());
-    if (!Boolean.parseBoolean(taskConfigs.get(BatchConfigProperties.OVERWRITE_OUTPUT)) && outputFileFS
-        .exists(outputSegmentDirURI)) {
+    if (!Boolean.parseBoolean(taskConfigs.get(BatchConfigProperties.OVERWRITE_OUTPUT)) && outputFileFS.exists(outputSegmentDirURI)) {
       LOGGER.warn("Not overwrite existing output segment tar file: {}", outputFileFS.exists(outputSegmentDirURI));
     } else {
       outputFileFS.copyFromLocalFile(localSegmentTarFile, outputSegmentTarURI);
@@ -243,8 +238,8 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     TarGzCompressionUtils.createTarGzFile(localSegmentDir, localSegmentTarFile);
     long uncompressedSegmentSize = FileUtils.sizeOf(localSegmentDir);
     long compressedSegmentSize = FileUtils.sizeOf(localSegmentTarFile);
-    LOGGER.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName,
-        DataSizeUtils.fromBytes(uncompressedSegmentSize), DataSizeUtils.fromBytes(compressedSegmentSize));
+    LOGGER.info("Size for segment: {}, uncompressed: {}, compressed: {}", segmentName, DataSizeUtils.fromBytes(uncompressedSegmentSize),
+        DataSizeUtils.fromBytes(compressedSegmentSize));
     return localSegmentTarFile;
   }
 
@@ -276,8 +271,7 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     String tableNameWithType = taskConfigs.get(BatchConfigProperties.TABLE_NAME);
     Schema schema;
     if (taskConfigs.containsKey(BatchConfigProperties.SCHEMA)) {
-      schema = JsonUtils
-          .stringToObject(JsonUtils.objectToString(taskConfigs.get(BatchConfigProperties.SCHEMA)), Schema.class);
+      schema = JsonUtils.stringToObject(JsonUtils.objectToString(taskConfigs.get(BatchConfigProperties.SCHEMA)), Schema.class);
     } else if (taskConfigs.containsKey(BatchConfigProperties.SCHEMA_URI)) {
       schema = SegmentGenerationUtils.getSchema(taskConfigs.get(BatchConfigProperties.SCHEMA_URI), authToken);
     } else {
@@ -288,21 +282,18 @@ public class SegmentGenerationAndPushTaskExecutor extends BaseTaskExecutor {
     if (taskConfigs.containsKey(BatchConfigProperties.TABLE_CONFIGS)) {
       tableConfig = JsonUtils.stringToObject(taskConfigs.get(BatchConfigProperties.TABLE_CONFIGS), TableConfig.class);
     } else if (taskConfigs.containsKey(BatchConfigProperties.TABLE_CONFIGS_URI)) {
-      tableConfig =
-          SegmentGenerationUtils.getTableConfig(taskConfigs.get(BatchConfigProperties.TABLE_CONFIGS_URI), authToken);
+      tableConfig = SegmentGenerationUtils.getTableConfig(taskConfigs.get(BatchConfigProperties.TABLE_CONFIGS_URI), authToken);
     } else {
       tableConfig = getTableConfig(tableNameWithType);
     }
     taskSpec.setTableConfig(tableConfig);
     taskSpec.setSequenceId(Integer.parseInt(taskConfigs.get(BatchConfigProperties.SEQUENCE_ID)));
     if (taskConfigs.containsKey(BatchConfigProperties.FAIL_ON_EMPTY_SEGMENT)) {
-      taskSpec
-          .setFailOnEmptySegment(Boolean.parseBoolean(taskConfigs.get(BatchConfigProperties.FAIL_ON_EMPTY_SEGMENT)));
+      taskSpec.setFailOnEmptySegment(Boolean.parseBoolean(taskConfigs.get(BatchConfigProperties.FAIL_ON_EMPTY_SEGMENT)));
     }
     SegmentNameGeneratorSpec segmentNameGeneratorSpec = new SegmentNameGeneratorSpec();
     segmentNameGeneratorSpec.setType(taskConfigs.get(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE));
-    segmentNameGeneratorSpec.setConfigs(IngestionConfigUtils
-        .getConfigMapWithPrefix(taskConfigs, BatchConfigProperties.SEGMENT_NAME_GENERATOR_PROP_PREFIX));
+    segmentNameGeneratorSpec.setConfigs(IngestionConfigUtils.getConfigMapWithPrefix(taskConfigs, BatchConfigProperties.SEGMENT_NAME_GENERATOR_PROP_PREFIX));
     taskSpec.setSegmentNameGeneratorSpec(segmentNameGeneratorSpec);
     taskSpec.setCustomProperty(BatchConfigProperties.INPUT_DATA_FILE_URI_KEY, inputFileURI.toString());
     return taskSpec;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutorFactory.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutorFactory.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutorFactory.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutorFactory.java
index bd06f0c..383f4e0 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskExecutorFactory.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskExecutorFactory.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import org.apache.pinot.core.common.MinionConstants;
 import org.apache.pinot.minion.executor.MinionTaskZkMetadataManager;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGenerator.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGenerator.java
similarity index 88%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGenerator.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGenerator.java
index b8c63bd..8993875 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGenerator.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import com.google.common.base.Preconditions;
 import java.io.IOException;
@@ -87,8 +87,7 @@ import org.slf4j.LoggerFactory;
 @TaskGenerator
 public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator {
   private static final Logger LOGGER = LoggerFactory.getLogger(SegmentGenerationAndPushTaskGenerator.class);
-  private static final BatchConfigProperties.SegmentPushType DEFAULT_SEGMENT_PUSH_TYPE =
-      BatchConfigProperties.SegmentPushType.TAR;
+  private static final BatchConfigProperties.SegmentPushType DEFAULT_SEGMENT_PUSH_TYPE = BatchConfigProperties.SegmentPushType.TAR;
 
   private ClusterInfoAccessor _clusterInfoAccessor;
 
@@ -104,14 +103,13 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
 
   @Override
   public int getNumConcurrentTasksPerInstance() {
-    String numConcurrentTasksPerInstanceStr = _clusterInfoAccessor
-        .getClusterConfig(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE);
+    String numConcurrentTasksPerInstanceStr =
+        _clusterInfoAccessor.getClusterConfig(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE);
     if (numConcurrentTasksPerInstanceStr != null) {
       try {
         return Integer.parseInt(numConcurrentTasksPerInstanceStr);
       } catch (Exception e) {
-        LOGGER.error("Failed to parse cluster config: {}",
-            MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE, e);
+        LOGGER.error("Failed to parse cluster config: {}", MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE, e);
       }
     }
     return JobConfig.DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
@@ -131,8 +129,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
 
       TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig();
       Preconditions.checkNotNull(tableTaskConfig);
-      Map<String, String> taskConfigs =
-          tableTaskConfig.getConfigsForTaskType(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE);
+      Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE);
       Preconditions.checkNotNull(taskConfigs, "Task config shouldn't be null for Table: {}", offlineTableName);
 
       // Get max number of tasks for this table
@@ -159,8 +156,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
       List<Map<String, String>> batchConfigMaps = batchIngestionConfig.getBatchConfigMaps();
       for (Map<String, String> batchConfigMap : batchConfigMaps) {
         try {
-          URI inputDirURI =
-              SegmentGenerationUtils.getDirectoryURI(batchConfigMap.get(BatchConfigProperties.INPUT_DIR_URI));
+          URI inputDirURI = SegmentGenerationUtils.getDirectoryURI(batchConfigMap.get(BatchConfigProperties.INPUT_DIR_URI));
           updateRecordReaderConfigs(batchConfigMap);
           List<SegmentZKMetadata> segmentsZKMetadata = Collections.emptyList();
           // For append mode, we don't create segments for input file URIs already created.
@@ -170,17 +166,14 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
           Set<String> existingSegmentInputFiles = getExistingSegmentInputFiles(segmentsZKMetadata);
           Set<String> inputFilesFromRunningTasks = getInputFilesFromRunningTasks();
           existingSegmentInputFiles.addAll(inputFilesFromRunningTasks);
-          LOGGER.info("Trying to extract input files from path: {}, "
-                  + "and exclude input files from existing segments metadata: {}, "
-                  + "and input files from running tasks: {}", inputDirURI, existingSegmentInputFiles,
-              inputFilesFromRunningTasks);
+          LOGGER.info("Trying to extract input files from path: {}, " + "and exclude input files from existing segments metadata: {}, "
+              + "and input files from running tasks: {}", inputDirURI, existingSegmentInputFiles, inputFilesFromRunningTasks);
           List<URI> inputFileURIs = getInputFilesFromDirectory(batchConfigMap, inputDirURI, existingSegmentInputFiles);
           LOGGER.info("Final input files for task config generation: {}", inputFileURIs);
           for (URI inputFileURI : inputFileURIs) {
             Map<String, String> singleFileGenerationTaskConfig =
                 getSingleFileGenerationTaskConfig(offlineTableName, tableNumTasks, batchConfigMap, inputFileURI);
-            pinotTaskConfigs.add(new PinotTaskConfig(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE,
-                singleFileGenerationTaskConfig));
+            pinotTaskConfigs.add(new PinotTaskConfig(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE, singleFileGenerationTaskConfig));
             tableNumTasks++;
 
             // Generate up to tableMaxNumTasks tasks each time for each table
@@ -189,8 +182,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
             }
           }
         } catch (Exception e) {
-          LOGGER.error("Unable to generate the SegmentGenerationAndPush task. [ table configs: {}, task configs: {} ]",
-              tableConfig, taskConfigs, e);
+          LOGGER.error("Unable to generate the SegmentGenerationAndPush task. [ table configs: {}, task configs: {} ]", tableConfig, taskConfigs, e);
         }
       }
     }
@@ -199,8 +191,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
 
   private Set<String> getInputFilesFromRunningTasks() {
     Set<String> inputFilesFromRunningTasks = new HashSet<>();
-    Map<String, TaskState> taskStates =
-        _clusterInfoAccessor.getTaskStates(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE);
+    Map<String, TaskState> taskStates = _clusterInfoAccessor.getTaskStates(MinionConstants.SegmentGenerationAndPushTask.TASK_TYPE);
     for (String taskName : taskStates.keySet()) {
       switch (taskStates.get(taskName)) {
         case FAILED:
@@ -208,6 +199,8 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
         case STOPPED:
         case COMPLETED:
           continue;
+        default:
+          break;
       }
       List<PinotTaskConfig> taskConfigs = _clusterInfoAccessor.getTaskConfigs(taskName);
       for (PinotTaskConfig taskConfig : taskConfigs) {
@@ -222,8 +215,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
     return inputFilesFromRunningTasks;
   }
 
-  private Map<String, String> getSingleFileGenerationTaskConfig(String offlineTableName, int sequenceID,
-      Map<String, String> batchConfigMap, URI inputFileURI)
+  private Map<String, String> getSingleFileGenerationTaskConfig(String offlineTableName, int sequenceID, Map<String, String> batchConfigMap, URI inputFileURI)
       throws URISyntaxException {
 
     URI inputDirURI = SegmentGenerationUtils.getDirectoryURI(batchConfigMap.get(BatchConfigProperties.INPUT_DIR_URI));
@@ -234,16 +226,14 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
     String pushMode = IngestionConfigUtils.getPushMode(batchConfigMap);
 
     Map<String, String> singleFileGenerationTaskConfig = new HashMap<>(batchConfigMap);
-    singleFileGenerationTaskConfig
-        .put(BatchConfigProperties.TABLE_NAME, TableNameBuilder.OFFLINE.tableNameWithType(offlineTableName));
+    singleFileGenerationTaskConfig.put(BatchConfigProperties.TABLE_NAME, TableNameBuilder.OFFLINE.tableNameWithType(offlineTableName));
     singleFileGenerationTaskConfig.put(BatchConfigProperties.INPUT_DATA_FILE_URI_KEY, inputFileURI.toString());
     if (outputDirURI != null) {
       URI outputSegmentDirURI = SegmentGenerationUtils.getRelativeOutputPath(inputDirURI, inputFileURI, outputDirURI);
       singleFileGenerationTaskConfig.put(BatchConfigProperties.OUTPUT_SEGMENT_DIR_URI, outputSegmentDirURI.toString());
     }
     singleFileGenerationTaskConfig.put(BatchConfigProperties.SEQUENCE_ID, String.valueOf(sequenceID));
-    singleFileGenerationTaskConfig
-        .put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.SIMPLE);
+    singleFileGenerationTaskConfig.put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.SIMPLE);
     if ((outputDirURI == null) || (pushMode == null)) {
       singleFileGenerationTaskConfig.put(BatchConfigProperties.PUSH_MODE, DEFAULT_SEGMENT_PUSH_TYPE.toString());
     } else {
@@ -265,8 +255,7 @@ public class SegmentGenerationAndPushTaskGenerator implements PinotTaskGenerator
     }
   }
 
-  private List<URI> getInputFilesFromDirectory(Map<String, String> batchConfigMap, URI inputDirURI,
-      Set<String> existingSegmentInputFileURIs)
+  private List<URI> getInputFilesFromDirectory(Map<String, String> batchConfigMap, URI inputDirURI, Set<String> existingSegmentInputFileURIs)
       throws Exception {
     PinotFS inputDirFS = SegmentGenerationAndPushTaskUtils.getInputPinotFS(batchConfigMap, inputDirURI);
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskUtils.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskUtils.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskUtils.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskUtils.java
index bf95143..1927e83 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskUtils.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskUtils.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import java.net.URI;
 import java.util.Map;
@@ -30,6 +30,8 @@ import org.apache.pinot.spi.utils.IngestionConfigUtils;
 
 
 public class SegmentGenerationAndPushTaskUtils {
+  private SegmentGenerationAndPushTaskUtils() {
+  }
 
   private static final PinotFS LOCAL_PINOT_FS = new LocalPinotFS();
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
index 0139392..319fca5 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
@@ -46,10 +46,8 @@ public class MergeTaskUtilsTest {
 
   @Test
   public void testGetTimeHandlerConfig() {
-    TableConfig tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").setTimeColumnName("dateTime").build();
-    Schema schema = new Schema.SchemaBuilder()
-        .addDateTime("dateTime", DataType.LONG, "1:SECONDS:SIMPLE_DATE_FORMAT:yyyyMMddHHmmss", "1:SECONDS").build();
+    TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").setTimeColumnName("dateTime").build();
+    Schema schema = new Schema.SchemaBuilder().addDateTime("dateTime", DataType.LONG, "1:SECONDS:SIMPLE_DATE_FORMAT:yyyyMMddHHmmss", "1:SECONDS").build();
     Map<String, String> taskConfig = new HashMap<>();
     long expectedWindowStartMs = 1625097600000L;
     long expectedWindowEndMs = 1625184000000L;
@@ -69,8 +67,7 @@ public class MergeTaskUtilsTest {
     assertEquals(timeHandlerConfig.getPartitionBucketMs(), expectedPartitionBucketMs);
 
     // No time column in table config
-    TableConfig tableConfigWithoutTimeColumn =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
+    TableConfig tableConfigWithoutTimeColumn = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
     assertNull(MergeTaskUtils.getTimeHandlerConfig(tableConfigWithoutTimeColumn, schema, taskConfig));
 
     // Time column does not exist in schema
@@ -86,9 +83,7 @@ public class MergeTaskUtilsTest {
   @Test
   public void testGetPartitionerConfigs() {
     TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable")
-        .setSegmentPartitionConfig(
-            new SegmentPartitionConfig(Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10))))
-        .build();
+        .setSegmentPartitionConfig(new SegmentPartitionConfig(Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10)))).build();
     Schema schema = new Schema.SchemaBuilder().addSingleValueDimension("memberId", DataType.LONG).build();
     Map<String, String> taskConfig = Collections.emptyMap();
 
@@ -102,8 +97,7 @@ public class MergeTaskUtilsTest {
     assertEquals(columnPartitionConfig.getNumPartitions(), 10);
 
     // No partition column in table config
-    TableConfig tableConfigWithoutPartitionColumn =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
+    TableConfig tableConfigWithoutPartitionColumn = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
     assertTrue(MergeTaskUtils.getPartitionerConfigs(tableConfigWithoutPartitionColumn, schema, taskConfig).isEmpty());
 
     // Partition column does not exist in schema
@@ -118,12 +112,9 @@ public class MergeTaskUtilsTest {
 
   @Test
   public void testGetMergeType() {
-    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "concat")),
-        MergeType.CONCAT);
-    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "Rollup")),
-        MergeType.ROLLUP);
-    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "DeDuP")),
-        MergeType.DEDUP);
+    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "concat")), MergeType.CONCAT);
+    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "Rollup")), MergeType.ROLLUP);
+    assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "DeDuP")), MergeType.DEDUP);
     assertNull(MergeTaskUtils.getMergeType(Collections.emptyMap()));
 
     try {
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/TaskRegistryTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/TaskRegistryTest.java
index c369b33..667625a 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/TaskRegistryTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/TaskRegistryTest.java
@@ -21,15 +21,15 @@ package org.apache.pinot.plugin.minion.tasks;
 import java.util.Set;
 import org.apache.pinot.controller.helix.core.minion.generator.TaskGeneratorRegistry;
 import org.apache.pinot.minion.executor.TaskExecutorFactoryRegistry;
-import org.apache.pinot.plugin.minion.tasks.convert_to_raw_index.ConvertToRawIndexTaskExecutorFactory;
-import org.apache.pinot.plugin.minion.tasks.convert_to_raw_index.ConvertToRawIndexTaskGenerator;
-import org.apache.pinot.plugin.minion.tasks.merge_rollup.MergeRollupTaskExecutorFactory;
-import org.apache.pinot.plugin.minion.tasks.merge_rollup.MergeRollupTaskGenerator;
+import org.apache.pinot.plugin.minion.tasks.converttorawindex.ConvertToRawIndexTaskExecutorFactory;
+import org.apache.pinot.plugin.minion.tasks.converttorawindex.ConvertToRawIndexTaskGenerator;
+import org.apache.pinot.plugin.minion.tasks.mergerollup.MergeRollupTaskExecutorFactory;
+import org.apache.pinot.plugin.minion.tasks.mergerollup.MergeRollupTaskGenerator;
 import org.apache.pinot.plugin.minion.tasks.purge.PurgeTaskExecutorFactory;
-import org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments.RealtimeToOfflineSegmentsTaskExecutorFactory;
-import org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments.RealtimeToOfflineSegmentsTaskGenerator;
-import org.apache.pinot.plugin.minion.tasks.segment_generation_and_push.SegmentGenerationAndPushTaskExecutorFactory;
-import org.apache.pinot.plugin.minion.tasks.segment_generation_and_push.SegmentGenerationAndPushTaskGenerator;
+import org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments.RealtimeToOfflineSegmentsTaskExecutorFactory;
+import org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments.RealtimeToOfflineSegmentsTaskGenerator;
+import org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush.SegmentGenerationAndPushTaskExecutorFactory;
+import org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush.SegmentGenerationAndPushTaskGenerator;
 import org.testng.Assert;
 import org.testng.annotations.Test;
 
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorTest.java
similarity index 95%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorTest.java
index 5cf3afc..80abbf6 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskExecutorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskExecutorTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -96,8 +96,7 @@ public class MergeRollupTaskExecutorTest {
     ZkHelixPropertyStore<ZNRecord> helixPropertyStore = Mockito.mock(ZkHelixPropertyStore.class);
     Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/testTable_OFFLINE", null, AccessOption.PERSISTENT))
         .thenReturn(TableConfigUtils.toZNRecord(tableConfig));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTable", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schema));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTable", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schema));
     minionContext.setHelixPropertyStore(helixPropertyStore);
   }
 
@@ -110,8 +109,7 @@ public class MergeRollupTaskExecutorTest {
     configs.put(MinionConstants.MergeRollupTask.MERGE_LEVEL_KEY, "daily");
 
     PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, configs);
-    List<SegmentConversionResult> conversionResults =
-        mergeRollupTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = mergeRollupTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     Assert.assertEquals(conversionResults.size(), 1);
     Assert.assertEquals(conversionResults.get(0).getSegmentName(), MERGED_SEGMENT_NAME);
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGeneratorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGeneratorTest.java
similarity index 70%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGeneratorTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGeneratorTest.java
index 5bcd3ce..828f516 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskGeneratorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskGeneratorTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -96,29 +96,25 @@ public class MergeRollupTaskGeneratorTest {
     assertTrue(pinotTaskConfigs.isEmpty());
 
     // Skip task generation, if REFRESH table
-    IngestionConfig ingestionConfig =
-        new IngestionConfig(new BatchIngestionConfig(null, "REFRESH", null), null, null, null, null);
+    IngestionConfig ingestionConfig = new IngestionConfig(new BatchIngestionConfig(null, "REFRESH", null), null, null, null, null);
     offlineTableConfig = getOfflineTableConfig(new HashMap<>());
     offlineTableConfig.setIngestionConfig(ingestionConfig);
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
     assertTrue(pinotTaskConfigs.isEmpty());
   }
 
-  private void checkPinotTaskConfig(Map<String, String> pinotTaskConfig, String segments, String mergeLevel,
-      String mergeType, String partitionBucketTimePeriod, String roundBucketTimePeriod,
-      String maxNumRecordsPerSegments) {
+  private void checkPinotTaskConfig(Map<String, String> pinotTaskConfig, String segments, String mergeLevel, String mergeType, String partitionBucketTimePeriod,
+      String roundBucketTimePeriod, String maxNumRecordsPerSegments) {
     assertEquals(pinotTaskConfig.get(MinionConstants.TABLE_NAME_KEY), OFFLINE_TABLE_NAME);
     assertEquals(pinotTaskConfig.get(MinionConstants.SEGMENT_NAME_KEY), segments);
     assertTrue("true".equalsIgnoreCase(pinotTaskConfig.get(MinionConstants.ENABLE_REPLACE_SEGMENTS_KEY)));
     assertEquals(pinotTaskConfig.get(MinionConstants.MergeRollupTask.MERGE_LEVEL_KEY), mergeLevel);
     assertEquals(pinotTaskConfig.get(MinionConstants.MergeRollupTask.MERGE_TYPE_KEY), mergeType);
-    assertEquals(pinotTaskConfig.get(MinionConstants.MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY),
-        partitionBucketTimePeriod);
+    assertEquals(pinotTaskConfig.get(MinionConstants.MergeTask.PARTITION_BUCKET_TIME_PERIOD_KEY), partitionBucketTimePeriod);
     assertEquals(pinotTaskConfig.get(MinionConstants.MergeTask.ROUND_BUCKET_TIME_PERIOD_KEY), roundBucketTimePeriod);
-    assertEquals(pinotTaskConfig.get(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY),
-        maxNumRecordsPerSegments);
-    assertTrue(pinotTaskConfig.get(MinionConstants.MergeRollupTask.SEGMENT_NAME_PREFIX_KEY)
-        .startsWith(MinionConstants.MergeRollupTask.MERGED_SEGMENT_NAME_PREFIX));
+    assertEquals(pinotTaskConfig.get(MinionConstants.MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY), maxNumRecordsPerSegments);
+    assertTrue(
+        pinotTaskConfig.get(MinionConstants.MergeRollupTask.SEGMENT_NAME_PREFIX_KEY).startsWith(MinionConstants.MergeRollupTask.MERGED_SEGMENT_NAME_PREFIX));
   }
 
   private void mockMergeRollupTaskMetadataGetterAndSetter(ClusterInfoAccessor mockClusterInfoProvide) {
@@ -158,8 +154,7 @@ public class MergeRollupTaskGeneratorTest {
     taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
     TableConfig offlineTableConfig = getOfflineTableConfig(taskConfigsMap);
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(Collections.emptyList()));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(Collections.emptyList()));
     mockMergeRollupTaskMetadataGetterAndSetter(mockClusterInfoProvide);
 
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
@@ -186,8 +181,7 @@ public class MergeRollupTaskGeneratorTest {
 
     String segmentName1 = "testTable__1";
     long currentTime = System.currentTimeMillis();
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, currentTime - 500_000L, currentTime, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, currentTime - 500_000L, currentTime, TimeUnit.MILLISECONDS, null);
     metadata1.setTotalDocs(0);
     when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1));
 
@@ -215,8 +209,7 @@ public class MergeRollupTaskGeneratorTest {
 
     String segmentName1 = "testTable__1";
     long currentTime = System.currentTimeMillis();
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, currentTime - 500_000L, currentTime, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, currentTime - 500_000L, currentTime, TimeUnit.MILLISECONDS, null);
     when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1));
 
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
@@ -243,35 +236,28 @@ public class MergeRollupTaskGeneratorTest {
 
     String segmentName1 = "testTable__1";
     String segmentName2 = "testTable__2";
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, "download1");
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, "download1");
     metadata1.setTotalDocs(2000000L);
-    SegmentZKMetadata metadata2 =
-        getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, "download2");
+    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, "download2");
     metadata2.setTotalDocs(4000000L);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2));
 
     // Single task
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
     generator.init(mockClusterInfoProvide);
     List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
     assertEquals(pinotTaskConfigs.size(), 1);
-    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat",
-        "1d", null, "1000000");
+    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat", "1d", null, "1000000");
     assertEquals(pinotTaskConfigs.get(0).getConfigs().get(MinionConstants.DOWNLOAD_URL_KEY), "download1,download2");
 
     // Multiple tasks
     String segmentName3 = "testTable__3";
-    SegmentZKMetadata metadata3 =
-        getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata3 = getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
     metadata3.setTotalDocs(5000000L);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3));
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
     assertEquals(pinotTaskConfigs.size(), 2);
-    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat",
-        "1d", null, "1000000");
+    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat", "1d", null, "1000000");
     checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3, "daily", "concat", "1d", null, "1000000");
   }
 
@@ -287,44 +273,35 @@ public class MergeRollupTaskGeneratorTest {
     tableTaskConfigs.put("daily.bucketTimePeriod", "1d");
     tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000");
     taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
-    TableConfig offlineTableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
-            .setSegmentPartitionConfig(new SegmentPartitionConfig(
-                Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10))))
-            .setTaskConfig(new TableTaskConfig(taskConfigsMap)).build();
+    TableConfig offlineTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
+        .setSegmentPartitionConfig(new SegmentPartitionConfig(Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10))))
+        .setTaskConfig(new TableTaskConfig(taskConfigsMap)).build();
 
     String segmentName1 = "testTable__1";
     String segmentName2 = "testTable__2";
     String segmentName3 = "testTable__3";
     String segmentName4 = "testTable__4";
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, null);
-    metadata1.setPartitionMetadata(new SegmentPartitionMetadata(
-        Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0)))));
-    SegmentZKMetadata metadata2 =
-        getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
-    metadata2.setPartitionMetadata(new SegmentPartitionMetadata(
-        Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0)))));
-    SegmentZKMetadata metadata3 =
-        getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
-    metadata3.setPartitionMetadata(new SegmentPartitionMetadata(
-        Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1)))));
-    SegmentZKMetadata metadata4 =
-        getSegmentZKMetadata(segmentName4, 90_000_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
-    metadata4.setPartitionMetadata(new SegmentPartitionMetadata(
-        Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1)))));
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, null);
+    metadata1.setPartitionMetadata(
+        new SegmentPartitionMetadata(Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0)))));
+    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
+    metadata2.setPartitionMetadata(
+        new SegmentPartitionMetadata(Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0)))));
+    SegmentZKMetadata metadata3 = getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
+    metadata3.setPartitionMetadata(
+        new SegmentPartitionMetadata(Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1)))));
+    SegmentZKMetadata metadata4 = getSegmentZKMetadata(segmentName4, 90_000_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
+    metadata4.setPartitionMetadata(
+        new SegmentPartitionMetadata(Collections.singletonMap("memberId", new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1)))));
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3, metadata4));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3, metadata4));
 
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
     generator.init(mockClusterInfoProvide);
     List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
     assertEquals(pinotTaskConfigs.size(), 2);
-    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat",
-        "1d", null, "1000000");
-    checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3 + "," + segmentName4, "daily", "concat",
-        "1d", null, "1000000");
+    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat", "1d", null, "1000000");
+    checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3 + "," + segmentName4, "daily", "concat", "1d", null, "1000000");
 
     // With numMaxRecordsPerTask constraints
     tableTaskConfigs.put("daily.maxNumRecordsPerTask", "5000000");
@@ -335,8 +312,7 @@ public class MergeRollupTaskGeneratorTest {
 
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
     assertEquals(pinotTaskConfigs.size(), 3);
-    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat",
-        "1d", null, "1000000");
+    checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, "daily", "concat", "1d", null, "1000000");
     checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3, "daily", "concat", "1d", null, "1000000");
     checkPinotTaskConfig(pinotTaskConfigs.get(2).getConfigs(), segmentName4, "daily", "concat", "1d", null, "1000000");
   }
@@ -358,47 +334,39 @@ public class MergeRollupTaskGeneratorTest {
 
     String segmentName1 = "testTable__1";
     String segmentName2 = "testTable__2";
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
-    SegmentZKMetadata metadata2 =
-        getSegmentZKMetadata(segmentName2, 345_600_000L, 400_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 345_600_000L, 400_000_000L, TimeUnit.MILLISECONDS, null);
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2));
     mockMergeRollupTaskMetadataGetterAndSetter(mockClusterInfoProvide);
 
     // Cold start, set watermark to smallest segment metadata start time round off to the nearest bucket boundary
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
     generator.init(mockClusterInfoProvide);
     List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 86_400_000L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 86_400_000L);
     assertEquals(pinotTaskConfigs.size(), 1);
     checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1, "daily", "concat", "1d", null, "1000000");
 
     // Bump up watermark to the merge task execution window start time
     TreeMap<String, Long> waterMarkMap = new TreeMap<>();
     waterMarkMap.put(DAILY, 86_400_000L);
-    mockClusterInfoProvide
-        .setMergeRollupTaskMetadata(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap), -1);
+    mockClusterInfoProvide.setMergeRollupTaskMetadata(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap), -1);
     metadata1.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 345_600_000L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 345_600_000L);
     assertEquals(pinotTaskConfigs.size(), 1);
     checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName2, "daily", "concat", "1d", null, "1000000");
 
     // Not updating watermark if there's no unmerged segments
     waterMarkMap.put(DAILY, 345_600_000L);
-    mockClusterInfoProvide
-        .setMergeRollupTaskMetadata(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap), -1);
+    mockClusterInfoProvide.setMergeRollupTaskMetadata(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap), -1);
     metadata2.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 345_600_000L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 345_600_000L);
     assertEquals(pinotTaskConfigs.size(), 0);
   }
 
@@ -420,12 +388,9 @@ public class MergeRollupTaskGeneratorTest {
     String segmentName1 = "testTable__1";
     String segmentName2 = "testTable__2";
     String mergedSegmentName1 = "merged_testTable__1";
-    SegmentZKMetadata metadata1 =
-        getSegmentZKMetadata(segmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
-    SegmentZKMetadata metadata2 =
-        getSegmentZKMetadata(segmentName2, 345_600_000L, 400_000_000L, TimeUnit.MILLISECONDS, null);
-    SegmentZKMetadata merged_metadata1 =
-        getSegmentZKMetadata(mergedSegmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 345_600_000L, 400_000_000L, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata mergedMetadata1 = getSegmentZKMetadata(mergedSegmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
     Map<String, Long> waterMarkMap = new TreeMap<>();
     waterMarkMap.put(DAILY, 86_400_000L);
@@ -443,8 +408,7 @@ public class MergeRollupTaskGeneratorTest {
         .thenReturn(Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigs)));
 
     // If same task and table, IN_PROGRESS, then don't generate again
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2));
     taskStatesMap.put(taskName, TaskState.IN_PROGRESS);
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -452,8 +416,7 @@ public class MergeRollupTaskGeneratorTest {
     assertTrue(pinotTaskConfigs.isEmpty());
 
     // If same task and table, IN_PROGRESS, but older than 1 day, generate
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2));
     String oldTaskName = "Task_MergeRollupTask_" + (System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3));
     taskStatesMap.remove(taskName);
     taskStatesMap.put(oldTaskName, TaskState.IN_PROGRESS);
@@ -462,14 +425,11 @@ public class MergeRollupTaskGeneratorTest {
     checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1, "daily", "concat", "1d", null, "1000000");
 
     // If same task and table, but COMPLETED, generate
-    merged_metadata1
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(metadata1, metadata2, merged_metadata1));
+    mergedMetadata1.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1, metadata2, mergedMetadata1));
     SegmentLineage segmentLineage = new SegmentLineage(OFFLINE_TABLE_NAME);
     segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
-        new LineageEntry(Collections.singletonList(segmentName1), Collections.singletonList(mergedSegmentName1),
-            LineageEntryState.COMPLETED, 11111L));
+        new LineageEntry(Collections.singletonList(segmentName1), Collections.singletonList(mergedSegmentName1), LineageEntryState.COMPLETED, 11111L));
     when(mockClusterInfoProvide.getSegmentLineage(OFFLINE_TABLE_NAME)).thenReturn(segmentLineage);
     taskStatesMap.put(taskName, TaskState.COMPLETED);
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
@@ -505,18 +465,11 @@ public class MergeRollupTaskGeneratorTest {
     String segmentName3 = "testTable__3";
     String segmentName4 = "testTable__4";
     String segmentName5 = "testTable__5";
-    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS,
-        null); // starts 1 day since epoch
-    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS,
-        null); // starts 1 day since epoch
-    SegmentZKMetadata metadata3 = getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS,
-        null); // starts 1 day since epoch
-    SegmentZKMetadata metadata4 =
-        getSegmentZKMetadata(segmentName4, 2_505_600_000L, 2_592_010_000L, TimeUnit.MILLISECONDS,
-            null); // starts 29 days since epoch
-    SegmentZKMetadata metadata5 =
-        getSegmentZKMetadata(segmentName5, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS,
-            null); // starts 30 days since epoch
+    SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, null); // starts 1 day since epoch
+    SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, null); // starts 1 day since epoch
+    SegmentZKMetadata metadata3 = getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null); // starts 1 day since epoch
+    SegmentZKMetadata metadata4 = getSegmentZKMetadata(segmentName4, 2_505_600_000L, 2_592_010_000L, TimeUnit.MILLISECONDS, null); // starts 29 days since epoch
+    SegmentZKMetadata metadata5 = getSegmentZKMetadata(segmentName5, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS, null); // starts 30 days since epoch
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
     when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
         .thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5));
@@ -526,130 +479,111 @@ public class MergeRollupTaskGeneratorTest {
     MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
     generator.init(mockClusterInfoProvide);
     List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 86_400_000L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 86_400_000L);
     assertEquals(pinotTaskConfigs.size(), 1);
     Map<String, String> taskConfigsDaily1 = pinotTaskConfigs.get(0).getConfigs();
-    checkPinotTaskConfig(taskConfigsDaily1, segmentName1 + "," + segmentName2 + "," + segmentName3, "daily", "concat",
-        "1d", null, "1000000");
+    checkPinotTaskConfig(taskConfigsDaily1, segmentName1 + "," + segmentName2 + "," + segmentName3, "daily", "concat", "1d", null, "1000000");
 
     // Monthly task is not scheduled until there are 30 days daily merged segments available (monthly merge window endTimeMs > daily watermark)
     String segmentNameMergedDaily1 = "merged_testTable__1__2__3";
-    SegmentZKMetadata metadataMergedDaily1 =
-        getSegmentZKMetadata(segmentNameMergedDaily1, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
-    metadataMergedDaily1
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
+    SegmentZKMetadata metadataMergedDaily1 = getSegmentZKMetadata(segmentNameMergedDaily1, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
+    metadataMergedDaily1.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
     when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME))
         .thenReturn(Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1));
 
     SegmentLineage segmentLineage = new SegmentLineage(OFFLINE_TABLE_NAME);
     segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
-        new LineageEntry(Arrays.asList(segmentName1, segmentName2, segmentName3),
-            Collections.singletonList(segmentNameMergedDaily1), LineageEntryState.COMPLETED, 11111L));
+        new LineageEntry(Arrays.asList(segmentName1, segmentName2, segmentName3), Collections.singletonList(segmentNameMergedDaily1),
+            LineageEntryState.COMPLETED, 11111L));
     when(mockClusterInfoProvide.getSegmentLineage(OFFLINE_TABLE_NAME)).thenReturn(segmentLineage);
 
     Map<String, TaskState> taskStatesMap = new HashMap<>();
     String taskName1 = "Task_MergeRollupTask_1";
     taskStatesMap.put(taskName1, TaskState.COMPLETED);
     when(mockClusterInfoProvide.getTaskStates(MinionConstants.MergeRollupTask.TASK_TYPE)).thenReturn(taskStatesMap);
-    when(mockClusterInfoProvide.getTaskConfigs(taskName1)).thenReturn(
-        Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily1)));
+    when(mockClusterInfoProvide.getTaskConfigs(taskName1))
+        .thenReturn(Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily1)));
 
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
 
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 2_505_600_000L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 2_505_600_000L);
     assertEquals(pinotTaskConfigs.size(), 1);
     Map<String, String> taskConfigsDaily2 = pinotTaskConfigs.get(0).getConfigs();
     checkPinotTaskConfig(taskConfigsDaily2, segmentName4, "daily", "concat", "1d", null, "1000000");
 
     // Schedule multiple tasks for both merge levels simultaneously
     String segmentNameMergedDaily2 = "merged_testTable__4_1";
-    SegmentZKMetadata metadataMergedDaily2 =
-        getSegmentZKMetadata(segmentNameMergedDaily2, 2_505_600_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
-    metadataMergedDaily2
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
+    SegmentZKMetadata metadataMergedDaily2 = getSegmentZKMetadata(segmentNameMergedDaily2, 2_505_600_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
+    metadataMergedDaily2.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
     String segmentNameMergedDaily3 = "merged_testTable__4_2";
-    SegmentZKMetadata metadataMergedDaily3 =
-        getSegmentZKMetadata(segmentNameMergedDaily3, 2_592_000_000L, 2_592_010_000L, TimeUnit.MILLISECONDS, null);
-    metadataMergedDaily3
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists
-        .newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1, metadataMergedDaily2,
-            metadataMergedDaily3));
+    SegmentZKMetadata metadataMergedDaily3 = getSegmentZKMetadata(segmentNameMergedDaily3, 2_592_000_000L, 2_592_010_000L, TimeUnit.MILLISECONDS, null);
+    metadataMergedDaily3.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
+        Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1, metadataMergedDaily2, metadataMergedDaily3));
 
     segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
-        new LineageEntry(Collections.singletonList(segmentName4),
-            Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2), LineageEntryState.COMPLETED, 11111L));
+        new LineageEntry(Collections.singletonList(segmentName4), Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2), LineageEntryState.COMPLETED,
+            11111L));
 
     String taskName2 = "Task_MergeRollupTask_2";
     taskStatesMap.put(taskName2, TaskState.COMPLETED);
-    when(mockClusterInfoProvide.getTaskConfigs(taskName2)).thenReturn(
-        Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily2)));
+    when(mockClusterInfoProvide.getTaskConfigs(taskName2))
+        .thenReturn(Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily2)));
 
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
 
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 2_592_000_000L);
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(MONTHLY).longValue(), 0L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 2_592_000_000L);
+    assertEquals(
+        MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(MONTHLY)
+            .longValue(), 0L);
     assertEquals(pinotTaskConfigs.size(), 2);
     Map<String, String> taskConfigsDaily3 = pinotTaskConfigs.get(0).getConfigs();
     Map<String, String> taskConfigsMonthly1 = pinotTaskConfigs.get(1).getConfigs();
-    checkPinotTaskConfig(taskConfigsDaily3, segmentNameMergedDaily3 + "," + segmentName5, "daily", "concat", "1d", null,
-        "1000000");
-    checkPinotTaskConfig(taskConfigsMonthly1, segmentNameMergedDaily1 + "," + segmentNameMergedDaily2, "monthly",
-        "rollup", "30d", "30d", "2000000");
+    checkPinotTaskConfig(taskConfigsDaily3, segmentNameMergedDaily3 + "," + segmentName5, "daily", "concat", "1d", null, "1000000");
+    checkPinotTaskConfig(taskConfigsMonthly1, segmentNameMergedDaily1 + "," + segmentNameMergedDaily2, "monthly", "rollup", "30d", "30d", "2000000");
 
     // Not scheduling for daily tasks if there are no unmerged segments
     // Not scheduling for monthly tasks if there are no 30 days merged daily segments
     String segmentNameMergedDaily4 = "merged_testTable__4_2__5";
-    SegmentZKMetadata metadataMergedDaily4 =
-        getSegmentZKMetadata(segmentNameMergedDaily4, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS, null);
-    metadataMergedDaily4
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
+    SegmentZKMetadata metadataMergedDaily4 = getSegmentZKMetadata(segmentNameMergedDaily4, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS, null);
+    metadataMergedDaily4.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
     String segmentNameMergedMonthly1 = "merged_testTable__1__2__3__4_1";
-    SegmentZKMetadata metadataMergedMonthly1 =
-        getSegmentZKMetadata(segmentNameMergedMonthly1, 86_400_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
-    metadataMergedMonthly1
-        .setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, MONTHLY));
+    SegmentZKMetadata metadataMergedMonthly1 = getSegmentZKMetadata(segmentNameMergedMonthly1, 86_400_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
+    metadataMergedMonthly1.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, MONTHLY));
     when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists
-        .newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1, metadataMergedDaily2,
-            metadataMergedDaily3, metadataMergedDaily4, metadataMergedMonthly1));
+        .newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1, metadataMergedDaily2, metadataMergedDaily3,
+            metadataMergedDaily4, metadataMergedMonthly1));
 
     segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
-        new LineageEntry(Arrays.asList(segmentNameMergedDaily3, segmentName5),
-            Collections.singletonList(segmentNameMergedDaily4), LineageEntryState.COMPLETED, 11111L));
+        new LineageEntry(Arrays.asList(segmentNameMergedDaily3, segmentName5), Collections.singletonList(segmentNameMergedDaily4), LineageEntryState.COMPLETED,
+            11111L));
     segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
-        new LineageEntry(Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2),
-            Collections.singletonList(segmentNameMergedMonthly1), LineageEntryState.COMPLETED, 11111L));
+        new LineageEntry(Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2), Collections.singletonList(segmentNameMergedMonthly1),
+            LineageEntryState.COMPLETED, 11111L));
 
     String taskName3 = "Task_MergeRollupTask_3";
     taskStatesMap.put(taskName3, TaskState.COMPLETED);
-    when(mockClusterInfoProvide.getTaskConfigs(taskName3)).thenReturn(
-        Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily3)));
+    when(mockClusterInfoProvide.getTaskConfigs(taskName3))
+        .thenReturn(Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily3)));
     String taskName4 = "Task_MergeRollupTask_4";
     taskStatesMap.put(taskName4, TaskState.COMPLETED);
-    when(mockClusterInfoProvide.getTaskConfigs(taskName4)).thenReturn(
-        Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsMonthly1)));
+    when(mockClusterInfoProvide.getTaskConfigs(taskName4))
+        .thenReturn(Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsMonthly1)));
 
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
 
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(DAILY).longValue(), 2_592_000_000L); // 30 days since epoch
-    assertEquals(MergeRollupTaskMetadata
-        .fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap()
-        .get(MONTHLY).longValue(), 0L);
+    assertEquals(MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY)
+        .longValue(), 2_592_000_000L); // 30 days since epoch
+    assertEquals(
+        MergeRollupTaskMetadata.fromZNRecord(mockClusterInfoProvide.getMinionMergeRollupTaskZNRecord(OFFLINE_TABLE_NAME)).getWatermarkMap().get(MONTHLY)
+            .longValue(), 0L);
     assertEquals(pinotTaskConfigs.size(), 0);
   }
 
-  private SegmentZKMetadata getSegmentZKMetadata(String segmentName, long startTime, long endTime, TimeUnit timeUnit,
-      String downloadURL) {
+  private SegmentZKMetadata getSegmentZKMetadata(String segmentName, long startTime, long endTime, TimeUnit timeUnit, String downloadURL) {
     SegmentZKMetadata segmentZKMetadata = new SegmentZKMetadata(segmentName);
     segmentZKMetadata.setSegmentType(SegmentType.OFFLINE);
     segmentZKMetadata.setStartTime(startTime);
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtilsTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtilsTest.java
similarity index 97%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtilsTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtilsTest.java
index 864e57d..d8824de 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/merge_rollup/MergeRollupTaskUtilsTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/mergerollup/MergeRollupTaskUtilsTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.merge_rollup;
+package org.apache.pinot.plugin.minion.tasks.mergerollup;
 
 import java.util.HashMap;
 import java.util.Map;
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutorTest.java
index d260081..9a9f2af 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/purge/PurgeTaskExecutorTest.java
@@ -118,8 +118,8 @@ public class PurgeTaskExecutorTest {
   public void testConvert()
       throws Exception {
     PurgeTaskExecutor purgeTaskExecutor = new PurgeTaskExecutor();
-    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.PurgeTask.TASK_TYPE, Collections
-        .singletonMap(MinionConstants.TABLE_NAME_KEY, TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME)));
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.PurgeTask.TASK_TYPE,
+        Collections.singletonMap(MinionConstants.TABLE_NAME_KEY, TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME)));
     File purgedIndexDir = purgeTaskExecutor.convert(pinotTaskConfig, _originalIndexDir, PURGED_SEGMENT_DIR).getFile();
 
     try (PinotSegmentRecordReader pinotSegmentRecordReader = new PinotSegmentRecordReader(purgedIndexDir)) {
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorTest.java
similarity index 78%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorTest.java
index 558df37..351410d 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskExecutorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskExecutorTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments;
+package org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments;
 
 import com.google.common.collect.Lists;
 import java.io.File;
@@ -64,8 +64,7 @@ import static org.testng.Assert.assertTrue;
  * Tests for the {@link RealtimeToOfflineSegmentsTaskExecutor}
  */
 public class RealtimeToOfflineSegmentsTaskExecutorTest {
-  private static final File TEMP_DIR =
-      new File(FileUtils.getTempDirectory(), "RealtimeToOfflineSegmentTaskExecutorTest");
+  private static final File TEMP_DIR = new File(FileUtils.getTempDirectory(), "RealtimeToOfflineSegmentTaskExecutorTest");
   private static final File ORIGINAL_SEGMENT_DIR = new File(TEMP_DIR, "originalSegment");
   private static final File WORKING_DIR = new File(TEMP_DIR, "workingDir");
   private static final int NUM_SEGMENTS = 10;
@@ -88,36 +87,26 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
   public void setUp()
       throws Exception {
     FileUtils.deleteDirectory(TEMP_DIR);
-    TableConfig tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(T).build();
+    TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(T).build();
     Map<String, ColumnPartitionConfig> columnPartitionConfigMap = new HashMap<>();
     columnPartitionConfigMap.put(M1, new ColumnPartitionConfig("Modulo", 2));
-    TableConfig tableConfigWithPartitioning =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_WITH_PARTITIONING).setTimeColumnName(T)
-            .setSegmentPartitionConfig(new SegmentPartitionConfig(columnPartitionConfigMap)).build();
+    TableConfig tableConfigWithPartitioning = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_WITH_PARTITIONING).setTimeColumnName(T)
+        .setSegmentPartitionConfig(new SegmentPartitionConfig(columnPartitionConfigMap)).build();
     TableConfig tableConfigWithSortedCol =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_WITH_SORTED_COL).setTimeColumnName(T)
-            .setSortedColumn(D1).build();
+        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_WITH_SORTED_COL).setTimeColumnName(T).setSortedColumn(D1).build();
     TableConfig tableConfigEpochHours =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_EPOCH_HOURS).setTimeColumnName(T_TRX)
-            .setSortedColumn(D1).setIngestionConfig(
-            new IngestionConfig(null, null, null, Lists.newArrayList(new TransformConfig(T_TRX, "toEpochHours(t)")),
-                null)).build();
-    TableConfig tableConfigSDF =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_SDF).setTimeColumnName(T_TRX)
-            .setSortedColumn(D1).setIngestionConfig(new IngestionConfig(null, null, null,
-            Lists.newArrayList(new TransformConfig(T_TRX, "toDateTime(t, 'yyyyMMddHH')")), null)).build();
+        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_EPOCH_HOURS).setTimeColumnName(T_TRX).setSortedColumn(D1)
+            .setIngestionConfig(new IngestionConfig(null, null, null, Lists.newArrayList(new TransformConfig(T_TRX, "toEpochHours(t)")), null)).build();
+    TableConfig tableConfigSDF = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME_SDF).setTimeColumnName(T_TRX).setSortedColumn(D1)
+        .setIngestionConfig(new IngestionConfig(null, null, null, Lists.newArrayList(new TransformConfig(T_TRX, "toDateTime(t, 'yyyyMMddHH')")), null)).build();
     Schema schema =
-        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING)
-            .addMetric(M1, FieldSpec.DataType.INT)
+        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING).addMetric(M1, FieldSpec.DataType.INT)
             .addDateTime(T, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
     Schema schemaEpochHours =
-        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING)
-            .addMetric(M1, FieldSpec.DataType.INT)
+        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING).addMetric(M1, FieldSpec.DataType.INT)
             .addDateTime(T_TRX, FieldSpec.DataType.INT, "1:HOURS:EPOCH", "1:HOURS").build();
     Schema schemaSDF =
-        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING)
-            .addMetric(M1, FieldSpec.DataType.INT)
+        new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension(D1, FieldSpec.DataType.STRING).addMetric(M1, FieldSpec.DataType.INT)
             .addDateTime(T_TRX, FieldSpec.DataType.INT, "1:HOURS:SIMPLE_DATE_FORMAT:yyyyMMddHH", "1:HOURS").build();
 
     List<String> d1 = Lists.newArrayList("foo", "bar", "foo", "foo", "bar");
@@ -185,10 +174,8 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
     MinionContext minionContext = MinionContext.getInstance();
     @SuppressWarnings("unchecked")
     ZkHelixPropertyStore<ZNRecord> helixPropertyStore = Mockito.mock(ZkHelixPropertyStore.class);
-    Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME, null, AccessOption.PERSISTENT))
-        .thenReturn(TableConfigUtils.toZNRecord(tableConfig));
-    Mockito
-        .when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME_WITH_PARTITIONING, null, AccessOption.PERSISTENT))
+    Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME, null, AccessOption.PERSISTENT)).thenReturn(TableConfigUtils.toZNRecord(tableConfig));
+    Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME_WITH_PARTITIONING, null, AccessOption.PERSISTENT))
         .thenReturn(TableConfigUtils.toZNRecord(tableConfigWithPartitioning));
     Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME_WITH_SORTED_COL, null, AccessOption.PERSISTENT))
         .thenReturn(TableConfigUtils.toZNRecord(tableConfigWithSortedCol));
@@ -196,16 +183,11 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
         .thenReturn(TableConfigUtils.toZNRecord(tableConfigEpochHours));
     Mockito.when(helixPropertyStore.get("/CONFIGS/TABLE/" + TABLE_NAME_SDF, null, AccessOption.PERSISTENT))
         .thenReturn(TableConfigUtils.toZNRecord(tableConfigSDF));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTable", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schema));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableWithPartitioning", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schema));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableWithSortedCol", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schema));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableEpochHours", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schemaEpochHours));
-    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableSDF", null, AccessOption.PERSISTENT))
-        .thenReturn(SchemaUtils.toZNRecord(schemaSDF));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTable", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schema));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableWithPartitioning", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schema));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableWithSortedCol", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schema));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableEpochHours", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schemaEpochHours));
+    Mockito.when(helixPropertyStore.get("/SCHEMAS/testTableSDF", null, AccessOption.PERSISTENT)).thenReturn(SchemaUtils.toZNRecord(schemaSDF));
     minionContext.setHelixPropertyStore(helixPropertyStore);
   }
 
@@ -214,17 +196,14 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
       throws Exception {
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, "testTable_OFFLINE");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -241,18 +220,15 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
       throws Exception {
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -269,19 +245,16 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
       throws Exception {
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, "1d");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -298,8 +271,7 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
 
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
@@ -307,11 +279,9 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, "1d");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
     configs.put(M1 + MinionConstants.RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX, "max");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -331,17 +301,14 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
       throws Exception {
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME_WITH_PARTITIONING);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600468000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600617600000");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 2);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -363,18 +330,15 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
 
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME_WITH_SORTED_COL);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
@@ -391,15 +355,13 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
 
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME_EPOCH_HOURS);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
     List<SegmentConversionResult> conversionResults =
         realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirListEpochHours, WORKING_DIR);
@@ -420,18 +382,15 @@ public class RealtimeToOfflineSegmentsTaskExecutorTest {
 
     FileUtils.deleteQuietly(WORKING_DIR);
 
-    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
-        new RealtimeToOfflineSegmentsTaskExecutor(null);
+    RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null);
     Map<String, String> configs = new HashMap<>();
     configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME_SDF);
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
     configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup");
-    PinotTaskConfig pinotTaskConfig =
-        new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
+    PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
 
-    List<SegmentConversionResult> conversionResults =
-        realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirListSDF, WORKING_DIR);
+    List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirListSDF, WORKING_DIR);
 
     assertEquals(conversionResults.size(), 1);
     File resultingSegment = conversionResults.get(0).getFile();
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGeneratorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGeneratorTest.java
similarity index 80%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGeneratorTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGeneratorTest.java
index 933dfbb..01577fa 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtime_to_offline_segments/RealtimeToOfflineSegmentsTaskGeneratorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/realtimetoofflinesegments/RealtimeToOfflineSegmentsTaskGeneratorTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.realtime_to_offline_segments;
+package org.apache.pinot.plugin.minion.tasks.realtimetoofflinesegments;
 
 import com.google.common.collect.Lists;
 import java.util.HashMap;
@@ -55,24 +55,20 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
   private static final String RAW_TABLE_NAME = "testTable";
   private static final String REALTIME_TABLE_NAME = "testTable_REALTIME";
   private static final String TIME_COLUMN_NAME = "millisSinceEpoch";
-  private final Map<String, String> streamConfigs = new HashMap<>();
+  private final Map<String, String> _streamConfigs = new HashMap<>();
 
   @BeforeClass
   public void setup() {
-    streamConfigs.put(StreamConfigProperties.STREAM_TYPE, "kafka");
-    streamConfigs
-        .put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_CONSUMER_TYPES),
-            StreamConfig.ConsumerType.LOWLEVEL.toString());
-    streamConfigs.put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_TOPIC_NAME),
-        "myTopic");
-    streamConfigs
-        .put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_DECODER_CLASS),
-            "org.foo.Decoder");
+    _streamConfigs.put(StreamConfigProperties.STREAM_TYPE, "kafka");
+    _streamConfigs.put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_CONSUMER_TYPES),
+        StreamConfig.ConsumerType.LOWLEVEL.toString());
+    _streamConfigs.put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_TOPIC_NAME), "myTopic");
+    _streamConfigs.put(StreamConfigProperties.constructStreamProperty("kafka", StreamConfigProperties.STREAM_DECODER_CLASS), "org.foo.Decoder");
   }
 
   private TableConfig getRealtimeTableConfig(Map<String, Map<String, String>> taskConfigsMap) {
-    return new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
-        .setStreamConfigs(streamConfigs).setTaskConfig(new TableTaskConfig(taskConfigsMap)).build();
+    return new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME).setStreamConfigs(_streamConfigs)
+        .setTaskConfig(new TableTaskConfig(taskConfigsMap)).build();
   }
 
   /**
@@ -83,10 +79,8 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
 
     when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>());
-    SegmentZKMetadata segmentZKMetadata =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 5000, 50_000, TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata));
+    SegmentZKMetadata segmentZKMetadata = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 5000, 50_000, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata));
 
     RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -135,11 +129,8 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
         .thenReturn(Lists.newArrayList(new PinotTaskConfig(RealtimeToOfflineSegmentsTask.TASK_TYPE, taskConfigs)));
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME))
         .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, 100_000L));
-    SegmentZKMetadata segmentZKMetadata =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 80_000_000, 90_000_000, TimeUnit.MILLISECONDS,
-            null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata));
+    SegmentZKMetadata segmentZKMetadata = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 80_000_000, 90_000_000, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata));
 
     RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -155,8 +146,7 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertEquals(pinotTaskConfigs.size(), 1);
 
     // if same task and table, IN_PROGRESS, but older than 1 day, generate
-    String oldTaskName =
-        "Task_RealtimeToOfflineSegmentsTask_" + (System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3));
+    String oldTaskName = "Task_RealtimeToOfflineSegmentsTask_" + (System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3));
     taskStatesMap.remove(taskName);
     taskStatesMap.put(oldTaskName, TaskState.IN_PROGRESS);
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
@@ -183,10 +173,8 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertTrue(pinotTaskConfigs.isEmpty());
 
     // No COMPLETED segments in table
-    SegmentZKMetadata segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1));
+    SegmentZKMetadata segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1));
 
     generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -194,10 +182,8 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertTrue(pinotTaskConfigs.isEmpty());
 
     // 2 partitions. No COMPLETED segments for partition 0
-    SegmentZKMetadata segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 5000, 10000, TimeUnit.MILLISECONDS, null);
-    SegmentZKMetadata segmentZKMetadata3 =
-        getSegmentZKMetadata("testTable__1__1__13456", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata segmentZKMetadata2 = getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 5000, 10000, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata segmentZKMetadata3 = getSegmentZKMetadata("testTable__1__1__13456", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
     when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
         .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2, segmentZKMetadata3));
 
@@ -215,14 +201,11 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
     when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>());
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME)).thenReturn(null);
-    SegmentZKMetadata segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 1590048000000L, 1590134400000L,
-            TimeUnit.MILLISECONDS, "download1"); // 21 May 2020 8am to 22 May 2020 8am UTC
-    SegmentZKMetadata segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 1590048000000L, 1590134400000L,
-            TimeUnit.MILLISECONDS, "download2"); // 21 May 2020 8am to 22 May 2020 8am UTC
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    SegmentZKMetadata segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 1590048000000L, 1590134400000L, TimeUnit.MILLISECONDS,
+        "download1"); // 21 May 2020 8am to 22 May 2020 8am UTC
+    SegmentZKMetadata segmentZKMetadata2 = getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 1590048000000L, 1590134400000L, TimeUnit.MILLISECONDS,
+        "download2"); // 21 May 2020 8am to 22 May 2020 8am UTC
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
 
     // StartTime calculated using segment metadata
     Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
@@ -242,12 +225,11 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY), "1590105600000"); // 22 May 2020 UTC
 
     // Segment metadata in hoursSinceEpoch
-    segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 441680L, 441703L, TimeUnit.HOURS,
-        "download1"); // 21 May 2020 8am to 22 May 2020 8am UTC
-    segmentZKMetadata2 = getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 441680L, 441703L, TimeUnit.HOURS,
-        "download2"); // 21 May 2020 8am to 22 May 2020 8am UTC
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    segmentZKMetadata1 =
+        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 441680L, 441703L, TimeUnit.HOURS, "download1"); // 21 May 2020 8am to 22 May 2020 8am UTC
+    segmentZKMetadata2 =
+        getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 441680L, 441703L, TimeUnit.HOURS, "download2"); // 21 May 2020 8am to 22 May 2020 8am UTC
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
     generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
@@ -270,14 +252,11 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>());
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME))
         .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, 1590019200000L)); // 21 May 2020 UTC
-    SegmentZKMetadata segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 1589972400000L, 1590048000000L,
-            TimeUnit.MILLISECONDS, "download1"); // 05-20-2020T11:00:00 to 05-21-2020T08:00:00 UTC
-    SegmentZKMetadata segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__0__1__12345", Status.DONE, 1590048000000L, 1590134400000L,
-            TimeUnit.MILLISECONDS, "download2"); // 05-21-2020T08:00:00 UTC to 05-22-2020T08:00:00 UTC
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    SegmentZKMetadata segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 1589972400000L, 1590048000000L, TimeUnit.MILLISECONDS,
+        "download1"); // 05-20-2020T11:00:00 to 05-21-2020T08:00:00 UTC
+    SegmentZKMetadata segmentZKMetadata2 = getSegmentZKMetadata("testTable__0__1__12345", Status.DONE, 1590048000000L, 1590134400000L, TimeUnit.MILLISECONDS,
+        "download2"); // 05-21-2020T08:00:00 UTC to 05-22-2020T08:00:00 UTC
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
 
     // Default configs
     Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
@@ -319,8 +298,7 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertEquals(configs.get(MinionConstants.TABLE_NAME_KEY), REALTIME_TABLE_NAME);
     assertEquals(configs.get(MinionConstants.SEGMENT_NAME_KEY), "testTable__0__0__12345");
     assertEquals(configs.get(MinionConstants.DOWNLOAD_URL_KEY), "download1");
-    assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY),
-        "1590019200000"); // 05-21-2020T00:00:00
+    assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY), "1590019200000"); // 05-21-2020T00:00:00
     assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY), "1590026400000"); // 05-21-2020T02:00:00
 
     // Segment Processor configs
@@ -340,8 +318,7 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     assertEquals(configs.get(MinionConstants.TABLE_NAME_KEY), REALTIME_TABLE_NAME);
     assertEquals(configs.get(MinionConstants.SEGMENT_NAME_KEY), "testTable__0__0__12345,testTable__0__1__12345");
     assertEquals(configs.get(MinionConstants.DOWNLOAD_URL_KEY), "download1,download2");
-    assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY),
-        "1590019200000"); // 05-21-2020T00:00:00
+    assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY), "1590019200000"); // 05-21-2020T00:00:00
     assertEquals(configs.get(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY), "1590105600000"); // 05-22-2020T00:00:00
     assertEquals(configs.get(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY), "1h");
     assertEquals(configs.get(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY), "rollup");
@@ -362,12 +339,9 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
 
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME))
         .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, 100_000L));
-    SegmentZKMetadata segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 50_000, 150_000, TimeUnit.MILLISECONDS, null);
-    SegmentZKMetadata segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    SegmentZKMetadata segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 50_000, 150_000, TimeUnit.MILLISECONDS, null);
+    SegmentZKMetadata segmentZKMetadata2 = getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
 
     RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -376,22 +350,16 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
     assertTrue(pinotTaskConfigs.isEmpty());
 
-    segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 100_000, 200_000, TimeUnit.MILLISECONDS, null);
-    segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 100_000, 200_000, TimeUnit.MILLISECONDS, null);
+    segmentZKMetadata2 = getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
     assertTrue(pinotTaskConfigs.isEmpty());
 
     // last completed segment endtime ends at window end, allow
-    segmentZKMetadata1 =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 200_000, 86_500_000, TimeUnit.MILLISECONDS, null);
-    segmentZKMetadata2 =
-        getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
+    segmentZKMetadata1 = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, 200_000, 86_500_000, TimeUnit.MILLISECONDS, null);
+    segmentZKMetadata2 = getSegmentZKMetadata("testTable__0__1__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2));
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
     assertEquals(pinotTaskConfigs.size(), 1);
   }
@@ -410,10 +378,8 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME))
         .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, watermarkMs));
     SegmentZKMetadata segmentZKMetadata =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100,
-            TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata));
+        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata));
 
     RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
     generator.init(mockClusterInfoProvide);
@@ -430,18 +396,14 @@ public class RealtimeToOfflineSegmentsTaskGeneratorTest {
     watermarkMs = now - TimeUnit.DAYS.toMillis(10);
     when(mockClusterInfoProvide.getMinionRealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME))
         .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, watermarkMs));
-    segmentZKMetadata =
-        getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100,
-            TimeUnit.MILLISECONDS, null);
-    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
-        .thenReturn(Lists.newArrayList(segmentZKMetadata));
+    segmentZKMetadata = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100, TimeUnit.MILLISECONDS, null);
+    when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList(segmentZKMetadata));
 
     pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
     assertTrue(pinotTaskConfigs.isEmpty());
   }
 
-  private SegmentZKMetadata getSegmentZKMetadata(String segmentName, Status status, long startTime, long endTime,
-      TimeUnit timeUnit, String downloadURL) {
+  private SegmentZKMetadata getSegmentZKMetadata(String segmentName, Status status, long startTime, long endTime, TimeUnit timeUnit, String downloadURL) {
     SegmentZKMetadata realtimeSegmentZKMetadata = new SegmentZKMetadata(segmentName);
     realtimeSegmentZKMetadata.setStatus(status);
     realtimeSegmentZKMetadata.setStartTime(startTime);
diff --git a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGeneratorTest.java b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGeneratorTest.java
similarity index 86%
rename from pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGeneratorTest.java
rename to pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGeneratorTest.java
index 5fba979..5655dbc 100644
--- a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segment_generation_and_push/SegmentGenerationAndPushTaskGeneratorTest.java
+++ b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/segmentgenerationandpush/SegmentGenerationAndPushTaskGeneratorTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.plugin.minion.tasks.segment_generation_and_push;
+package org.apache.pinot.plugin.minion.tasks.segmentgenerationandpush;
 
 import java.util.Collections;
 import java.util.Map;
@@ -66,15 +66,14 @@ public class SegmentGenerationAndPushTaskGeneratorTest extends ControllerTest {
     Assert.assertEquals(_generator.getNumConcurrentTasksPerInstance(), 1);
 
     // Set config to 5
-    String request = JsonUtils.objectToString(Collections
-        .singletonMap(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE, "5"));
+    String request =
+        JsonUtils.objectToString(Collections.singletonMap(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE, "5"));
     ControllerTest.sendPostRequest(_controllerRequestURLBuilder.forClusterConfigs(), request);
     Assert.assertEquals(_generator.getNumConcurrentTasksPerInstance(), 5);
 
     // Set config to invalid and should still get 1
-    request = JsonUtils.objectToString(Collections
-        .singletonMap(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE,
-            "abcd"));
+    request =
+        JsonUtils.objectToString(Collections.singletonMap(MinionConstants.SegmentGenerationAndPushTask.CONFIG_NUMBER_CONCURRENT_TASKS_PER_INSTANCE, "abcd"));
     ControllerTest.sendPostRequest(_controllerRequestURLBuilder.forClusterConfigs(), request);
     Assert.assertEquals(_generator.getNumConcurrentTasksPerInstance(), 1);
   }
diff --git a/pinot-plugins/pinot-segment-uploader/pom.xml b/pinot-plugins/pinot-segment-uploader/pom.xml
index 8dc644d..6fb7859 100644
--- a/pinot-plugins/pinot-segment-uploader/pom.xml
+++ b/pinot-plugins/pinot-segment-uploader/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../..</pinot.root>
     <plugin.type>pinot-segment-uploader</plugin.type>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <modules>
diff --git a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/pom.xml b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/pom.xml
index a740f9c..d886c1a 100644
--- a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/pom.xml
+++ b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>none</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/main/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriter.java b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/main/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriter.java
index 58108e7..bdc82f7 100644
--- a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/main/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriter.java
+++ b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/main/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriter.java
@@ -86,19 +86,16 @@ public class FileBasedSegmentWriter implements SegmentWriter {
     _tableConfig = tableConfig;
     _tableNameWithType = _tableConfig.getTableName();
 
-    Preconditions.checkState(
-        _tableConfig.getIngestionConfig() != null && _tableConfig.getIngestionConfig().getBatchIngestionConfig() != null
-            && CollectionUtils
+    Preconditions.checkState(_tableConfig.getIngestionConfig() != null && _tableConfig.getIngestionConfig().getBatchIngestionConfig() != null && CollectionUtils
             .isNotEmpty(_tableConfig.getIngestionConfig().getBatchIngestionConfig().getBatchConfigMaps()),
-        "Must provide ingestionConfig->batchIngestionConfig->batchConfigMaps in tableConfig for table: %s",
-        _tableNameWithType);
+        "Must provide ingestionConfig->batchIngestionConfig->batchConfigMaps in tableConfig for table: %s", _tableNameWithType);
     _batchIngestionConfig = _tableConfig.getIngestionConfig().getBatchIngestionConfig();
-    Preconditions.checkState(_batchIngestionConfig.getBatchConfigMaps().size() == 1,
-        "batchConfigMaps must contain only 1 BatchConfig for table: %s", _tableNameWithType);
+    Preconditions.checkState(_batchIngestionConfig.getBatchConfigMaps().size() == 1, "batchConfigMaps must contain only 1 BatchConfig for table: %s",
+        _tableNameWithType);
     _batchConfig = new BatchConfig(_tableNameWithType, _batchIngestionConfig.getBatchConfigMaps().get(0));
 
-    Preconditions.checkState(StringUtils.isNotBlank(_batchConfig.getOutputDirURI()),
-        "Must provide: %s in batchConfigs for table: %s", BatchConfigProperties.OUTPUT_DIR_URI, _tableNameWithType);
+    Preconditions.checkState(StringUtils.isNotBlank(_batchConfig.getOutputDirURI()), "Must provide: %s in batchConfigs for table: %s",
+        BatchConfigProperties.OUTPUT_DIR_URI, _tableNameWithType);
     _outputDirURI = _batchConfig.getOutputDirURI();
     Files.createDirectories(Paths.get(_outputDirURI));
 
@@ -109,8 +106,7 @@ public class FileBasedSegmentWriter implements SegmentWriter {
     _reusableRecord = new GenericData.Record(_avroSchema);
 
     // Create tmp dir
-    _stagingDir = new File(FileUtils.getTempDirectory(),
-        String.format("segment_writer_staging_%s_%d", _tableNameWithType, System.currentTimeMillis()));
+    _stagingDir = new File(FileUtils.getTempDirectory(), String.format("segment_writer_staging_%s_%d", _tableNameWithType, System.currentTimeMillis()));
     Preconditions.checkState(_stagingDir.mkdirs(), "Failed to create staging dir: %s", _stagingDir.getAbsolutePath());
 
     // Create buffer file
@@ -173,32 +169,29 @@ public class FileBasedSegmentWriter implements SegmentWriter {
       batchConfigMapOverride.put(BatchConfigProperties.INPUT_DIR_URI, _bufferFile.getAbsolutePath());
       batchConfigMapOverride.put(BatchConfigProperties.OUTPUT_DIR_URI, segmentDir.getAbsolutePath());
       batchConfigMapOverride.put(BatchConfigProperties.INPUT_FORMAT, BUFFER_FILE_FORMAT.toString());
-      BatchIngestionConfig batchIngestionConfig = new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride),
-          _batchIngestionConfig.getSegmentIngestionType(), _batchIngestionConfig.getSegmentIngestionFrequency());
+      BatchIngestionConfig batchIngestionConfig =
+          new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride), _batchIngestionConfig.getSegmentIngestionType(),
+              _batchIngestionConfig.getSegmentIngestionFrequency());
 
       // Build segment
-      SegmentGeneratorConfig segmentGeneratorConfig =
-          IngestionUtils.generateSegmentGeneratorConfig(_tableConfig, _schema, batchIngestionConfig);
+      SegmentGeneratorConfig segmentGeneratorConfig = IngestionUtils.generateSegmentGeneratorConfig(_tableConfig, _schema, batchIngestionConfig);
       String segmentName = IngestionUtils.buildSegment(segmentGeneratorConfig);
       LOGGER.info("Successfully built segment: {} for table: {}", segmentName, _tableNameWithType);
 
       // Tar segment
       File segmentTarFile = new File(_outputDirURI, segmentName + Constants.TAR_GZ_FILE_EXT);
       if (!_batchConfig.isOverwriteOutput() && segmentTarFile.exists()) {
-        segmentTarFile = new File(_outputDirURI,
-            String.format("%s_%d%s", segmentName, System.currentTimeMillis(), Constants.TAR_GZ_FILE_EXT));
+        segmentTarFile = new File(_outputDirURI, String.format("%s_%d%s", segmentName, System.currentTimeMillis(), Constants.TAR_GZ_FILE_EXT));
       }
       TarGzCompressionUtils.createTarGzFile(new File(segmentDir, segmentName), segmentTarFile);
-      LOGGER.info("Created segment tar: {} for segment: {} of table: {}", segmentTarFile.getAbsolutePath(), segmentName,
-          _tableNameWithType);
+      LOGGER.info("Created segment tar: {} for segment: {} of table: {}", segmentTarFile.getAbsolutePath(), segmentName, _tableNameWithType);
 
       // Reset buffer and return segmentTar URI
       resetBuffer();
       return segmentTarFile.toURI();
     } catch (Exception e) {
-      throw new RuntimeException(String
-          .format("Caught exception while generating segment from buffer file: %s for table:%s",
-              _bufferFile.getAbsolutePath(), _tableNameWithType), e);
+      throw new RuntimeException(
+          String.format("Caught exception while generating segment from buffer file: %s for table:%s", _bufferFile.getAbsolutePath(), _tableNameWithType), e);
     } finally {
       FileUtils.deleteQuietly(flushDir);
     }
diff --git a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/test/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriterTest.java b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/test/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriterTest.java
index 7aaae70..4c34927 100644
--- a/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/test/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriterTest.java
+++ b/pinot-plugins/pinot-segment-writer/pinot-segment-writer-file-based/src/test/java/org/apache/pinot/plugin/segmentwriter/filebased/FileBasedSegmentWriterTest.java
@@ -75,21 +75,14 @@ public class FileBasedSegmentWriterTest {
     Map<String, String> batchConfigMap = new HashMap<>();
     batchConfigMap.put(BatchConfigProperties.OUTPUT_DIR_URI, _outputDir.getAbsolutePath());
     _ingestionConfig =
-        new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMap), "APPEND", "HOURLY"), null,
-            null, transformConfigs, null);
+        new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMap), "APPEND", "HOURLY"), null, null, transformConfigs, null);
     _tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIngestionConfig(_ingestionConfig)
-            .setTimeColumnName(TIME_COLUMN_NAME).build();
-    _schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
-        .addSingleValueDimension("aString", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("aSimpleMap_str", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("anAdvancedMap_str", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("nullString", FieldSpec.DataType.STRING)
-        .addSingleValueDimension("aBoolean", FieldSpec.DataType.BOOLEAN)
-        .addSingleValueDimension("aBytes", FieldSpec.DataType.BYTES)
-        .addMultiValueDimension("aStringList", FieldSpec.DataType.STRING)
-        .addMultiValueDimension("anIntList", FieldSpec.DataType.INT)
-        .addMultiValueDimension("aStringArray", FieldSpec.DataType.STRING)
+        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIngestionConfig(_ingestionConfig).setTimeColumnName(TIME_COLUMN_NAME).build();
+    _schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("aString", FieldSpec.DataType.STRING)
+        .addSingleValueDimension("aSimpleMap_str", FieldSpec.DataType.STRING).addSingleValueDimension("anAdvancedMap_str", FieldSpec.DataType.STRING)
+        .addSingleValueDimension("nullString", FieldSpec.DataType.STRING).addSingleValueDimension("aBoolean", FieldSpec.DataType.BOOLEAN)
+        .addSingleValueDimension("aBytes", FieldSpec.DataType.BYTES).addMultiValueDimension("aStringList", FieldSpec.DataType.STRING)
+        .addMultiValueDimension("anIntList", FieldSpec.DataType.INT).addMultiValueDimension("aStringArray", FieldSpec.DataType.STRING)
         .addMultiValueDimension("aDoubleArray", FieldSpec.DataType.DOUBLE).addMetric("anInt", FieldSpec.DataType.INT)
         .addMetric("aFloat", FieldSpec.DataType.FLOAT).addMetric("aDouble", FieldSpec.DataType.DOUBLE)
         .addDateTime(TIME_COLUMN_NAME, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
@@ -103,8 +96,7 @@ public class FileBasedSegmentWriterTest {
       throws Exception {
 
     SegmentWriter segmentWriter = new FileBasedSegmentWriter();
-    TableConfig tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME).build();
+    TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME).build();
     try {
       segmentWriter.init(tableConfig, _schema);
       Assert.fail("Should fail due to missing ingestionConfig");
@@ -120,8 +112,7 @@ public class FileBasedSegmentWriterTest {
       // expected
     }
 
-    tableConfig
-        .setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(null, "APPEND", "HOURLY"), null, null, null, null));
+    tableConfig.setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(null, "APPEND", "HOURLY"), null, null, null, null));
     try {
       segmentWriter.init(tableConfig, _schema);
       Assert.fail("Should fail due to missing batchConfigMaps");
@@ -129,8 +120,7 @@ public class FileBasedSegmentWriterTest {
       // expected
     }
 
-    tableConfig.setIngestionConfig(
-        new IngestionConfig(new BatchIngestionConfig(Collections.emptyList(), "APPEND", "HOURLY"), null, null, null, null));
+    tableConfig.setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(Collections.emptyList(), "APPEND", "HOURLY"), null, null, null, null));
     try {
       segmentWriter.init(tableConfig, _schema);
       Assert.fail("Should fail due to missing batchConfigMaps");
@@ -139,8 +129,7 @@ public class FileBasedSegmentWriterTest {
     }
 
     tableConfig.setIngestionConfig(
-        new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(Collections.emptyMap()), "APPEND", "HOURLY"),
-            null, null, null, null));
+        new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(Collections.emptyMap()), "APPEND", "HOURLY"), null, null, null, null));
     try {
       segmentWriter.init(tableConfig, _schema);
       Assert.fail("Should fail due to missing outputDirURI in batchConfigMap");
@@ -150,9 +139,8 @@ public class FileBasedSegmentWriterTest {
 
     Map<String, String> batchConfigMap = new HashMap<>();
     batchConfigMap.put(BatchConfigProperties.OUTPUT_DIR_URI, _outputDir.getAbsolutePath());
-    tableConfig.setIngestionConfig(
-        new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMap), "APPEND", "HOURLY"), null,
-            null, null, null));
+    tableConfig
+        .setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMap), "APPEND", "HOURLY"), null, null, null, null));
     segmentWriter.init(tableConfig, _schema);
     segmentWriter.close();
   }
@@ -177,8 +165,7 @@ public class FileBasedSegmentWriterTest {
     File segmentTar = new File(_outputDir, "segmentWriter_1616238000000_1616241600000.tar.gz");
     Assert.assertTrue(segmentTar.exists());
     TarGzCompressionUtils.untar(segmentTar, _outputDir);
-    SegmentMetadataImpl segmentMetadata =
-        new SegmentMetadataImpl(new File(_outputDir, "segmentWriter_1616238000000_1616241600000"));
+    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(new File(_outputDir, "segmentWriter_1616238000000_1616241600000"));
     Assert.assertEquals(segmentMetadata.getTotalDocs(), 3);
     Assert.assertEquals(segmentMetadata.getColumnMetadataFor("aString").getCardinality(), 3);
     Assert.assertEquals(segmentMetadata.getColumnMetadataFor("aLong").getCardinality(), 2);
@@ -221,8 +208,7 @@ public class FileBasedSegmentWriterTest {
     Assert.assertEquals(files.length, 1);
     File segmentTar = files[0];
     TarGzCompressionUtils.untar(segmentTar, _outputDir);
-    SegmentMetadataImpl segmentMetadata =
-        new SegmentMetadataImpl(new File(_outputDir, files[0].getName().split(Constants.TAR_GZ_FILE_EXT)[0]));
+    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(new File(_outputDir, files[0].getName().split(Constants.TAR_GZ_FILE_EXT)[0]));
     Assert.assertEquals(segmentMetadata.getTotalDocs(), 0);
     Assert.assertEquals(segmentMetadata.getColumnMetadataFor("aString").getCardinality(), 0);
     Assert.assertEquals(segmentMetadata.getColumnMetadataFor("aLong").getCardinality(), 0);
@@ -243,17 +229,13 @@ public class FileBasedSegmentWriterTest {
     // FIXED segment name
     Map<String, String> batchConfigMap = _ingestionConfig.getBatchIngestionConfig().getBatchConfigMaps().get(0);
     Map<String, String> batchConfigMapOverride = new HashMap<>(batchConfigMap);
+    batchConfigMapOverride.put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.FIXED);
     batchConfigMapOverride
-        .put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.FIXED);
-    batchConfigMapOverride.put(String
-            .format("%s.%s", BatchConfigProperties.SEGMENT_NAME_GENERATOR_PROP_PREFIX, BatchConfigProperties.SEGMENT_NAME),
-        "customSegmentName");
-    TableConfig tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
-            .setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride),
-                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
-                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null,
-                _ingestionConfig.getTransformConfigs(), null)).build();
+        .put(String.format("%s.%s", BatchConfigProperties.SEGMENT_NAME_GENERATOR_PROP_PREFIX, BatchConfigProperties.SEGMENT_NAME), "customSegmentName");
+    TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME).setIngestionConfig(
+        new IngestionConfig(
+            new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride), _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
+                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null, _ingestionConfig.getTransformConfigs(), null)).build();
 
     SegmentWriter segmentWriter = new FileBasedSegmentWriter();
     segmentWriter.init(tableConfig, _schema);
@@ -273,13 +255,10 @@ public class FileBasedSegmentWriterTest {
 
     // NORMALIZED segment name
     batchConfigMapOverride = new HashMap<>(batchConfigMap);
-    batchConfigMapOverride.put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE,
-        BatchConfigProperties.SegmentNameGeneratorType.NORMALIZED_DATE);
+    batchConfigMapOverride.put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.NORMALIZED_DATE);
     tableConfig.setIngestionConfig(new IngestionConfig(
-        new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride),
-            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
-            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null,
-        _ingestionConfig.getTransformConfigs(), null));
+        new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride), _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
+            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null, _ingestionConfig.getTransformConfigs(), null));
     segmentWriter.init(tableConfig, _schema);
 
     // write 2 records
@@ -296,14 +275,11 @@ public class FileBasedSegmentWriterTest {
 
     // SIMPLE segment name w/ sequenceId
     batchConfigMapOverride = new HashMap<>(batchConfigMap);
-    batchConfigMapOverride
-        .put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.SIMPLE);
+    batchConfigMapOverride.put(BatchConfigProperties.SEGMENT_NAME_GENERATOR_TYPE, BatchConfigProperties.SegmentNameGeneratorType.SIMPLE);
     batchConfigMapOverride.put(BatchConfigProperties.SEQUENCE_ID, "1001");
     tableConfig.setIngestionConfig(new IngestionConfig(
-        new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride),
-            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
-            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null,
-        _ingestionConfig.getTransformConfigs(), null));
+        new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride), _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
+            _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null, _ingestionConfig.getTransformConfigs(), null));
     segmentWriter.init(tableConfig, _schema);
 
     // write 2 records
@@ -331,12 +307,10 @@ public class FileBasedSegmentWriterTest {
     Map<String, String> batchConfigMap = _ingestionConfig.getBatchIngestionConfig().getBatchConfigMaps().get(0);
     Map<String, String> batchConfigMapOverride = new HashMap<>(batchConfigMap);
     batchConfigMapOverride.put(BatchConfigProperties.OVERWRITE_OUTPUT, "true");
-    TableConfig tableConfig =
-        new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
-            .setIngestionConfig(new IngestionConfig(new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride),
-                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
-                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null,
-                _ingestionConfig.getTransformConfigs(), null)).build();
+    TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME).setIngestionConfig(
+        new IngestionConfig(
+            new BatchIngestionConfig(Lists.newArrayList(batchConfigMapOverride), _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionType(),
+                _ingestionConfig.getBatchIngestionConfig().getSegmentIngestionFrequency()), null, null, _ingestionConfig.getTransformConfigs(), null)).build();
     segmentWriter.init(tableConfig, _schema);
 
     // write 3 records with same timestamp
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/pom.xml b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/pom.xml
index 34b31f1..aa7cca8 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/pom.xml
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/pom.xml
@@ -38,9 +38,6 @@
     <kafka.lib.version>0.9.0.1</kafka.lib.version>
     <kafka.scala.version>2.10</kafka.scala.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/ConsumerAndIterator.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/ConsumerAndIterator.java
index 24c1e08..5d79dfd 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/ConsumerAndIterator.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/ConsumerAndIterator.java
@@ -27,15 +27,16 @@ import kafka.javaapi.consumer.ConsumerConnector;
  * Immutable tuple object for a Kafka consumer and stream iterator.
  */
 public class ConsumerAndIterator {
+  private static final AtomicLong ID_GENERATOR = new AtomicLong(0L);
+
   private final ConsumerConnector _consumer;
   private final ConsumerIterator<byte[], byte[]> _iterator;
   private final long _id;
-  private static final AtomicLong idGenerator = new AtomicLong(0L);
 
   ConsumerAndIterator(final ConsumerConnector consumer, final ConsumerIterator<byte[], byte[]> iterator) {
     _consumer = consumer;
     _iterator = iterator;
-    _id = idGenerator.getAndIncrement();
+    _id = ID_GENERATOR.getAndIncrement();
   }
 
   public ConsumerConnector getConsumer() {
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaBrokerWrapper.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaBrokerWrapper.java
index 6c130d6..2f796a1 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaBrokerWrapper.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaBrokerWrapper.java
@@ -47,7 +47,7 @@ public class KafkaBrokerWrapper {
   private final Object _kafkaBroker;
 
   public KafkaBrokerWrapper(Object kafkaBroker) {
-    this._kafkaBroker = kafkaBroker;
+    _kafkaBroker = kafkaBroker;
   }
 
   public String host() {
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConnectionHandler.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConnectionHandler.java
index 5bfb34b..5ac9886 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConnectionHandler.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConnectionHandler.java
@@ -75,7 +75,7 @@ public class KafkaConnectionHandler {
 
   final Random _random = new Random();
 
-  boolean isPartitionProvided;
+  boolean _isPartitionProvided;
 
   private final KafkaLowLevelStreamConfig _kafkaLowLevelStreamConfig;
 
@@ -118,7 +118,7 @@ public class KafkaConnectionHandler {
     _connectTimeoutMillis = streamConfig.getConnectionTimeoutMillis();
     _simpleConsumer = null;
 
-    isPartitionProvided = false;
+    _isPartitionProvided = false;
     _partition = Integer.MIN_VALUE;
 
     _bufferSize = _kafkaLowLevelStreamConfig.getKafkaBufferSize();
@@ -143,7 +143,7 @@ public class KafkaConnectionHandler {
     _connectTimeoutMillis = streamConfig.getConnectionTimeoutMillis();
     _simpleConsumer = null;
 
-    isPartitionProvided = true;
+    _isPartitionProvided = true;
     _partition = partition;
 
     _bufferSize = _kafkaLowLevelStreamConfig.getKafkaBufferSize();
@@ -187,10 +187,10 @@ public class KafkaConnectionHandler {
   }
 
   abstract class State {
-    private ConsumerState stateValue;
+    private ConsumerState _stateValue;
 
     protected State(ConsumerState stateValue) {
-      this.stateValue = stateValue;
+      _stateValue = stateValue;
     }
 
     abstract void process();
@@ -209,7 +209,7 @@ public class KafkaConnectionHandler {
     }
 
     ConsumerState getStateValue() {
-      return stateValue;
+      return _stateValue;
     }
   }
 
@@ -256,7 +256,7 @@ public class KafkaConnectionHandler {
 
     @Override
     void process() {
-      if (isPartitionProvided) {
+      if (_isPartitionProvided) {
         // If we're consuming from a partition, we need to find the leader so that we can consume from it. By design,
         // Kafka only allows consumption from the leader and not one of the in-sync replicas.
         setCurrentState(new FetchingLeaderInformation());
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConsumerManager.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConsumerManager.java
index 4fe79f0..e40b9b4 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConsumerManager.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaConsumerManager.java
@@ -57,6 +57,8 @@ import org.slf4j.LoggerFactory;
  * Kafka APIs.
  */
 public class KafkaConsumerManager {
+  private KafkaConsumerManager() {
+  }
 
   private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerManager.class);
   private static final Long IN_USE = -1L;
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaHighLevelStreamConfig.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaHighLevelStreamConfig.java
index 45cb649..8a1f585 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaHighLevelStreamConfig.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaHighLevelStreamConfig.java
@@ -40,20 +40,20 @@ public class KafkaHighLevelStreamConfig {
   private static final String DEFAULT_REBALANCE_BACKOFF_MS = "2000";
   private static final String DEFAULT_AUTO_COMMIT_ENABLE = "false";
 
-  private static final Map<String, String> defaultProps;
+  private static final Map<String, String> DEFAULT_PROPS;
 
   static {
-    defaultProps = new HashMap<>();
-    defaultProps
+    DEFAULT_PROPS = new HashMap<>();
+    DEFAULT_PROPS
         .put(KafkaStreamConfigProperties.HighLevelConsumer.ZK_SESSION_TIMEOUT_MS, DEFAULT_ZK_SESSION_TIMEOUT_MS);
-    defaultProps
+    DEFAULT_PROPS
         .put(KafkaStreamConfigProperties.HighLevelConsumer.ZK_CONNECTION_TIMEOUT_MS, DEFAULT_ZK_CONNECTION_TIMEOUT_MS);
-    defaultProps.put(KafkaStreamConfigProperties.HighLevelConsumer.ZK_SYNC_TIME_MS, DEFAULT_ZK_SYNC_TIME);
+    DEFAULT_PROPS.put(KafkaStreamConfigProperties.HighLevelConsumer.ZK_SYNC_TIME_MS, DEFAULT_ZK_SYNC_TIME);
     // Rebalance retries will take up to 1 mins to fail.
-    defaultProps
+    DEFAULT_PROPS
         .put(KafkaStreamConfigProperties.HighLevelConsumer.REBALANCE_MAX_RETRIES, DEFAULT_REBALANCE_MAX_RETRIES);
-    defaultProps.put(KafkaStreamConfigProperties.HighLevelConsumer.REBALANCE_BACKOFF_MS, DEFAULT_REBALANCE_BACKOFF_MS);
-    defaultProps.put(KafkaStreamConfigProperties.HighLevelConsumer.AUTO_COMMIT_ENABLE, DEFAULT_AUTO_COMMIT_ENABLE);
+    DEFAULT_PROPS.put(KafkaStreamConfigProperties.HighLevelConsumer.REBALANCE_BACKOFF_MS, DEFAULT_REBALANCE_BACKOFF_MS);
+    DEFAULT_PROPS.put(KafkaStreamConfigProperties.HighLevelConsumer.AUTO_COMMIT_ENABLE, DEFAULT_AUTO_COMMIT_ENABLE);
   }
 
   private String _kafkaTopicName;
@@ -115,8 +115,8 @@ public class KafkaHighLevelStreamConfig {
 
   public ConsumerConfig getKafkaConsumerConfig() {
     Properties props = new Properties();
-    for (String key : defaultProps.keySet()) {
-      props.put(key, defaultProps.get(key));
+    for (String key : DEFAULT_PROPS.keySet()) {
+      props.put(key, DEFAULT_PROPS.get(key));
     }
     for (String key : _kafkaConsumerProperties.keySet()) {
       props.put(key, _kafkaConsumerProperties.get(key));
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamLevelConsumer.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamLevelConsumer.java
index 5b7ee2f..49e3929 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamLevelConsumer.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamLevelConsumer.java
@@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
 public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
 
   private StreamMessageDecoder _messageDecoder;
-  private Logger INSTANCE_LOGGER;
+  private Logger _instanceLogger;
 
   private String _clientId;
   private String _tableAndStreamName;
@@ -44,12 +44,12 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
   private StreamConfig _streamConfig;
   private KafkaHighLevelStreamConfig _kafkaHighLevelStreamConfig;
 
-  private ConsumerConnector consumer;
-  private ConsumerIterator<byte[], byte[]> kafkaIterator;
-  private ConsumerAndIterator consumerAndIterator;
-  private long lastLogTime = 0;
-  private long lastCount = 0;
-  private long currentCount = 0L;
+  private ConsumerConnector _consumer;
+  private ConsumerIterator<byte[], byte[]> _kafkaIterator;
+  private ConsumerAndIterator _consumerAndIterator;
+  private long _lastLogTime = 0;
+  private long _lastCount = 0;
+  private long _currentCount = 0L;
 
   public KafkaStreamLevelConsumer(String clientId, String tableName, StreamConfig streamConfig,
       Set<String> fieldsToRead, String groupId) {
@@ -60,41 +60,41 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
     _messageDecoder = StreamDecoderProvider.create(streamConfig, fieldsToRead);
 
     _tableAndStreamName = tableName + "-" + streamConfig.getTopicName();
-    INSTANCE_LOGGER = LoggerFactory
+    _instanceLogger = LoggerFactory
         .getLogger(KafkaStreamLevelConsumer.class.getName() + "_" + tableName + "_" + streamConfig.getTopicName());
   }
 
   @Override
   public void start()
       throws Exception {
-    consumerAndIterator = KafkaConsumerManager.acquireConsumerAndIteratorForConfig(_kafkaHighLevelStreamConfig);
-    kafkaIterator = consumerAndIterator.getIterator();
-    consumer = consumerAndIterator.getConsumer();
+    _consumerAndIterator = KafkaConsumerManager.acquireConsumerAndIteratorForConfig(_kafkaHighLevelStreamConfig);
+    _kafkaIterator = _consumerAndIterator.getIterator();
+    _consumer = _consumerAndIterator.getConsumer();
   }
 
   @Override
   public GenericRow next(GenericRow destination) {
 
-    if (kafkaIterator.hasNext()) {
+    if (_kafkaIterator.hasNext()) {
       try {
-        destination = _messageDecoder.decode(kafkaIterator.next().message(), destination);
-        ++currentCount;
+        destination = _messageDecoder.decode(_kafkaIterator.next().message(), destination);
+        ++_currentCount;
 
         final long now = System.currentTimeMillis();
         // Log every minute or 100k events
-        if (now - lastLogTime > 60000 || currentCount - lastCount >= 100000) {
-          if (lastCount == 0) {
-            INSTANCE_LOGGER.info("Consumed {} events from kafka stream {}", currentCount, _streamConfig.getTopicName());
+        if (now - _lastLogTime > 60000 || _currentCount - _lastCount >= 100000) {
+          if (_lastCount == 0) {
+            _instanceLogger.info("Consumed {} events from kafka stream {}", _currentCount, _streamConfig.getTopicName());
           } else {
-            INSTANCE_LOGGER.info("Consumed {} events from kafka stream {} (rate:{}/s)", currentCount - lastCount,
-                _streamConfig.getTopicName(), (float) (currentCount - lastCount) * 1000 / (now - lastLogTime));
+            _instanceLogger.info("Consumed {} events from kafka stream {} (rate:{}/s)", _currentCount - _lastCount,
+                _streamConfig.getTopicName(), (float) (_currentCount - _lastCount) * 1000 / (now - _lastLogTime));
           }
-          lastCount = currentCount;
-          lastLogTime = now;
+          _lastCount = _currentCount;
+          _lastLogTime = now;
         }
         return destination;
       } catch (Exception e) {
-        INSTANCE_LOGGER.warn("Caught exception while consuming events", e);
+        _instanceLogger.warn("Caught exception while consuming events", e);
         throw e;
       }
     }
@@ -103,18 +103,18 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
 
   @Override
   public void commit() {
-    consumer.commitOffsets();
+    _consumer.commitOffsets();
   }
 
   @Override
   public void shutdown()
       throws Exception {
-    if (consumerAndIterator != null) {
-      kafkaIterator = null;
-      consumer = null;
+    if (_consumerAndIterator != null) {
+      _kafkaIterator = null;
+      _consumer = null;
 
-      KafkaConsumerManager.releaseConsumerAndIterator(consumerAndIterator);
-      consumerAndIterator = null;
+      KafkaConsumerManager.releaseConsumerAndIterator(_consumerAndIterator);
+      _consumerAndIterator = null;
     }
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamMetadataProvider.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamMetadataProvider.java
index 06ee697..2ef6023 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamMetadataProvider.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/KafkaStreamMetadataProvider.java
@@ -86,9 +86,9 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
   @Override
   public synchronized int fetchPartitionCount(long timeoutMillis) {
     int unknownTopicReplyCount = 0;
-    final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
+    final int maxUnknownTopicReplyCount = 10;
     int kafkaErrorCount = 0;
-    final int MAX_KAFKA_ERROR_COUNT = 10;
+    final int maxKafkaErrorCount = 10;
 
     final long endTime = System.currentTimeMillis() + timeoutMillis;
 
@@ -123,7 +123,7 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
       } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
         throw new RuntimeException("Invalid topic name " + _topic);
       } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
-        if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
+        if (maxUnknownTopicReplyCount < unknownTopicReplyCount) {
           throw new RuntimeException("Topic " + _topic + " does not exist");
         } else {
           // Kafka topic creation can sometimes take some time, so we'll retry after a little bit
@@ -134,7 +134,7 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
         // Retry after a short delay
         kafkaErrorCount++;
 
-        if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
+        if (maxKafkaErrorCount < kafkaErrorCount) {
           throw exceptionForKafkaErrorCode(errorCode);
         }
 
@@ -162,7 +162,7 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
   @Override
   public synchronized StreamPartitionMsgOffset fetchStreamPartitionOffset(@Nonnull OffsetCriteria offsetCriteria, long timeoutMillis)
       throws java.util.concurrent.TimeoutException {
-    Preconditions.checkState(isPartitionProvided,
+    Preconditions.checkState(_isPartitionProvided,
         "Cannot fetch partition offset. StreamMetadataProvider created without partition information");
     Preconditions.checkNotNull(offsetCriteria);
 
@@ -176,7 +176,7 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
     }
 
     int kafkaErrorCount = 0;
-    final int MAX_KAFKA_ERROR_COUNT = 10;
+    final int maxKafkaErrorCount = 10;
 
     final long endTime = System.currentTimeMillis() + timeoutMillis;
 
@@ -219,7 +219,7 @@ public class KafkaStreamMetadataProvider extends KafkaConnectionHandler implemen
         // Retry after a short delay
         kafkaErrorCount++;
 
-        if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
+        if (maxKafkaErrorCount < kafkaErrorCount) {
           throw exceptionForKafkaErrorCode(errorCode);
         }
 
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/SimpleConsumerMessageBatch.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/SimpleConsumerMessageBatch.java
index ab333de..207e43b 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/SimpleConsumerMessageBatch.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/SimpleConsumerMessageBatch.java
@@ -27,28 +27,28 @@ import org.apache.pinot.spi.stream.StreamPartitionMsgOffset;
 
 public class SimpleConsumerMessageBatch implements MessageBatch<byte[]> {
 
-  private ArrayList<MessageAndOffset> messageList = new ArrayList<>();
+  private ArrayList<MessageAndOffset> _messageList = new ArrayList<>();
 
   public SimpleConsumerMessageBatch(Iterable<MessageAndOffset> messageAndOffsetIterable) {
     for (MessageAndOffset messageAndOffset : messageAndOffsetIterable) {
-      messageList.add(messageAndOffset);
+      _messageList.add(messageAndOffset);
     }
   }
 
   public int getMessageCount() {
-    return messageList.size();
+    return _messageList.size();
   }
 
   public byte[] getMessageAtIndex(int index) {
-    return messageList.get(index).message().payload().array();
+    return _messageList.get(index).message().payload().array();
   }
 
   public int getMessageOffsetAtIndex(int index) {
-    return messageList.get(index).message().payload().arrayOffset();
+    return _messageList.get(index).message().payload().arrayOffset();
   }
 
   public int getMessageLengthAtIndex(int index) {
-    return messageList.get(index).message().payloadSize();
+    return _messageList.get(index).message().payloadSize();
   }
 
   @Override
@@ -57,6 +57,6 @@ public class SimpleConsumerMessageBatch implements MessageBatch<byte[]> {
   }
 
   public StreamPartitionMsgOffset getNextStreamParitionMsgOffsetAtIndex(int index) {
-    return new LongMsgOffset(messageList.get(index).nextOffset());
+    return new LongMsgOffset(_messageList.get(index).nextOffset());
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataProducer.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataProducer.java
index 46aacce..a1e48af 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataProducer.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataProducer.java
@@ -26,12 +26,12 @@ import org.apache.pinot.spi.stream.StreamDataProducer;
 
 
 public class KafkaDataProducer implements StreamDataProducer {
-  Producer<byte[], byte[]> producer;
+  private Producer<byte[], byte[]> _producer;
 
   @Override
   public void init(Properties props) {
     ProducerConfig producerConfig = new ProducerConfig(props);
-    this.producer = new Producer(producerConfig);
+    _producer = new Producer(producerConfig);
   }
 
   @Override
@@ -47,11 +47,11 @@ public class KafkaDataProducer implements StreamDataProducer {
   }
 
   public void produce(KeyedMessage message) {
-    producer.send(message);
+    _producer.send(message);
   }
 
   @Override
   public void close() {
-    producer.close();
+    _producer.close();
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataServerStartable.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataServerStartable.java
index 6644981..897f302 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataServerStartable.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/main/java/org/apache/pinot/plugin/stream/kafka09/server/KafkaDataServerStartable.java
@@ -105,7 +105,7 @@ public class KafkaDataServerStartable implements StreamDataServerStartable {
   @Override
   public void createTopic(String topic, Properties props) {
     invokeTopicCommand(
-        new String[]{"--create", "--zookeeper", this._zkStr, "--replication-factor", "1", "--partitions", Integer.toString(
+        new String[]{"--create", "--zookeeper", _zkStr, "--replication-factor", "1", "--partitions", Integer.toString(
             (Integer) props.get("partition")), "--topic", topic});
   }
 
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaLowLevelStreamConfigTest.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaLowLevelStreamConfigTest.java
index 440a985..a635646 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaLowLevelStreamConfigTest.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaLowLevelStreamConfigTest.java
@@ -32,7 +32,7 @@ public class KafkaLowLevelStreamConfigTest {
       "org.apache.pinot.plugin.inputformat.avro.KafkaAvroMessageDecoder";
 
   private KafkaLowLevelStreamConfig getStreamConfig(String topic, String bootstrapHosts, String buffer,
-                                                    String socketTimeout) {
+      String socketTimeout) {
     return getStreamConfig(topic, bootstrapHosts, buffer, socketTimeout, null, null);
   }
 
@@ -126,7 +126,7 @@ public class KafkaLowLevelStreamConfigTest {
   @Test
   public void testGetFetcherSize() {
     // test default
-    KafkaLowLevelStreamConfig config = getStreamConfig("topic", "host1", "", "", "",null);
+    KafkaLowLevelStreamConfig config = getStreamConfig("topic", "host1", "", "", "", null);
     Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_BUFFER_SIZE_DEFAULT,
         config.getKafkaFetcherSizeBytes());
 
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaPartitionLevelConsumerTest.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaPartitionLevelConsumerTest.java
index beb82e5..e71d475 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaPartitionLevelConsumerTest.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-0.9/src/test/java/org/apache/pinot/plugin/stream/kafka09/KafkaPartitionLevelConsumerTest.java
@@ -56,45 +56,45 @@ import scala.collection.immutable.List;
 public class KafkaPartitionLevelConsumerTest {
   public class MockKafkaSimpleConsumerFactory implements KafkaSimpleConsumerFactory {
 
-    private String[] hosts;
-    private int[] ports;
-    private int[] partitionLeaderIndices;
-    private int brokerCount;
-    private int partitionCount;
-    private String topicName;
-    private BrokerEndPoint[] brokerArray;
+    private String[] _hosts;
+    private int[] _ports;
+    private int[] _partitionLeaderIndices;
+    private int _brokerCount;
+    private int _partitionCount;
+    private String _topicName;
+    private BrokerEndPoint[] _brokerArray;
 
     public MockKafkaSimpleConsumerFactory(String[] hosts, int[] ports, long[] partitionStartOffsets,
         long[] partitionEndOffsets, int[] partitionLeaderIndices, String topicName) {
       Preconditions.checkArgument(hosts.length == ports.length);
-      this.hosts = hosts;
-      this.ports = ports;
-      brokerCount = hosts.length;
+      _hosts = hosts;
+      _ports = ports;
+      _brokerCount = hosts.length;
 
-      brokerArray = new BrokerEndPoint[brokerCount];
-      for (int i = 0; i < brokerCount; i++) {
-        brokerArray[i] = new BrokerEndPoint(i, hosts[i], ports[i]);
+      _brokerArray = new BrokerEndPoint[_brokerCount];
+      for (int i = 0; i < _brokerCount; i++) {
+        _brokerArray[i] = new BrokerEndPoint(i, hosts[i], ports[i]);
       }
 
       Preconditions.checkArgument(partitionStartOffsets.length == partitionEndOffsets.length);
       Preconditions.checkArgument(partitionStartOffsets.length == partitionLeaderIndices.length);
-      this.partitionLeaderIndices = partitionLeaderIndices;
-      partitionCount = partitionStartOffsets.length;
+      _partitionLeaderIndices = partitionLeaderIndices;
+      _partitionCount = partitionStartOffsets.length;
 
-      this.topicName = topicName;
+      _topicName = topicName;
     }
 
     private class MockFetchResponse extends FetchResponse {
-      java.util.Map<TopicAndPartition, Short> errorMap;
+      java.util.Map<TopicAndPartition, Short> _errorMap;
 
       public MockFetchResponse(java.util.Map<TopicAndPartition, Short> errorMap) {
         super(null);
-        this.errorMap = errorMap;
+        _errorMap = errorMap;
       }
 
       @Override
       public ByteBufferMessageSet messageSet(String topic, int partition) {
-        if (errorMap.containsKey(new TopicAndPartition(topic, partition))) {
+        if (_errorMap.containsKey(new TopicAndPartition(topic, partition))) {
           throw new IllegalArgumentException();
         } else {
           // TODO Maybe generate dummy messages here?
@@ -105,8 +105,8 @@ public class KafkaPartitionLevelConsumerTest {
       @Override
       public short errorCode(String topic, int partition) {
         TopicAndPartition key = new TopicAndPartition(topic, partition);
-        if (errorMap.containsKey(key)) {
-          return errorMap.get(key);
+        if (_errorMap.containsKey(key)) {
+          return _errorMap.get(key);
         } else {
           return Errors.NONE.code();
         }
@@ -118,16 +118,16 @@ public class KafkaPartitionLevelConsumerTest {
       }
 
       public boolean hasError() {
-        return !errorMap.isEmpty();
+        return !_errorMap.isEmpty();
       }
     }
 
     private class MockSimpleConsumer extends SimpleConsumer {
-      private int index;
+      private int _index;
 
       public MockSimpleConsumer(String host, int port, int soTimeout, int bufferSize, String clientId, int index) {
         super(host, port, soTimeout, bufferSize, clientId);
-        this.index = index;
+        _index = index;
       }
 
       @Override
@@ -143,11 +143,11 @@ public class KafkaPartitionLevelConsumerTest {
           TopicAndPartition topicAndPartition = t2._1();
           PartitionFetchInfo partitionFetchInfo = t2._2();
 
-          if (!topicAndPartition.topic().equals(topicName)) {
+          if (!topicAndPartition.topic().equals(_topicName)) {
             errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
-          } else if (partitionLeaderIndices.length < topicAndPartition.partition()) {
+          } else if (_partitionLeaderIndices.length < topicAndPartition.partition()) {
             errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
-          } else if (partitionLeaderIndices[topicAndPartition.partition()] != index) {
+          } else if (_partitionLeaderIndices[topicAndPartition.partition()] != _index) {
             errorMap.put(topicAndPartition, Errors.NOT_LEADER_FOR_PARTITION.code());
           } else {
             // Do nothing, we'll generate a fake message
@@ -176,15 +176,15 @@ public class KafkaPartitionLevelConsumerTest {
 
         for (int i = 0; i < topicMetadataArray.length; i++) {
           String topic = topics.get(i);
-          if (!topic.equals(topicName)) {
+          if (!topic.equals(_topicName)) {
             topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
           } else {
-            PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
-            for (int j = 0; j < partitionCount; j++) {
+            PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[_partitionCount];
+            for (int j = 0; j < _partitionCount; j++) {
               java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
               List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
               partitionMetadataArray[j] =
-                  new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
+                  new PartitionMetadata(j, Some.apply(_brokerArray[_partitionLeaderIndices[j]]), emptyScalaList,
                       emptyScalaList, Errors.NONE.code());
             }
 
@@ -193,7 +193,7 @@ public class KafkaPartitionLevelConsumerTest {
           }
         }
 
-        Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
+        Seq<BrokerEndPoint> brokers = List.fromArray(_brokerArray);
         Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);
 
         return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
@@ -202,8 +202,8 @@ public class KafkaPartitionLevelConsumerTest {
 
     @Override
     public SimpleConsumer buildSimpleConsumer(String host, int port, int soTimeout, int bufferSize, String clientId) {
-      for (int i = 0; i < brokerCount; i++) {
-        if (hosts[i].equalsIgnoreCase(host) && ports[i] == port) {
+      for (int i = 0; i < _brokerCount; i++) {
+        if (_hosts[i].equalsIgnoreCase(host) && _ports[i] == port) {
           return new MockSimpleConsumer(host, port, soTimeout, bufferSize, clientId, i);
         }
       }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/pom.xml b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/pom.xml
index 228a4e8..fc6429a 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/pom.xml
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/pom.xml
@@ -37,9 +37,6 @@
     <pinot.root>${basedir}/../../..</pinot.root>
     <kafka.lib.version>2.0.0</kafka.lib.version>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaMessageBatch.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaMessageBatch.java
index 82b25a3..db91da7 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaMessageBatch.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaMessageBatch.java
@@ -30,32 +30,32 @@ import org.apache.pinot.spi.stream.StreamPartitionMsgOffset;
 
 public class KafkaMessageBatch implements MessageBatch<byte[]> {
 
-  private List<MessageAndOffset> messageList = new ArrayList<>();
+  private List<MessageAndOffset> _messageList = new ArrayList<>();
 
   public KafkaMessageBatch(Iterable<ConsumerRecord<String, Bytes>> iterable) {
     for (ConsumerRecord<String, Bytes> record : iterable) {
-      messageList.add(new MessageAndOffset(record.value().get(), record.offset()));
+      _messageList.add(new MessageAndOffset(record.value().get(), record.offset()));
     }
   }
 
   @Override
   public int getMessageCount() {
-    return messageList.size();
+    return _messageList.size();
   }
 
   @Override
   public byte[] getMessageAtIndex(int index) {
-    return messageList.get(index).getMessage().array();
+    return _messageList.get(index).getMessage().array();
   }
 
   @Override
   public int getMessageOffsetAtIndex(int index) {
-    return messageList.get(index).getMessage().arrayOffset();
+    return _messageList.get(index).getMessage().arrayOffset();
   }
 
   @Override
   public int getMessageLengthAtIndex(int index) {
-    return messageList.get(index).payloadSize();
+    return _messageList.get(index).payloadSize();
   }
 
   @Override
@@ -65,6 +65,6 @@ public class KafkaMessageBatch implements MessageBatch<byte[]> {
 
   @Override
   public StreamPartitionMsgOffset getNextStreamParitionMsgOffsetAtIndex(int index) {
-    return new LongMsgOffset(messageList.get(index).getNextOffset());
+    return new LongMsgOffset(_messageList.get(index).getNextOffset());
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumer.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumer.java
index 56e062f..83d0c83 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumer.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumer.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
 public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
 
   private StreamMessageDecoder _messageDecoder;
-  private Logger INSTANCE_LOGGER;
+  private Logger _instanceLogger;
 
   private String _clientId;
   private String _tableAndStreamName;
@@ -52,14 +52,14 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
   private StreamConfig _streamConfig;
   private KafkaStreamLevelStreamConfig _kafkaStreamLevelStreamConfig;
 
-  private KafkaConsumer<Bytes, Bytes> consumer;
-  private ConsumerRecords<Bytes, Bytes> consumerRecords;
-  private Iterator<ConsumerRecord<Bytes, Bytes>> kafkaIterator;
-  private Map<Integer, Long> consumerOffsets = new HashMap<>(); // tracking current consumed records offsets.
+  private KafkaConsumer<Bytes, Bytes> _consumer;
+  private ConsumerRecords<Bytes, Bytes> _consumerRecords;
+  private Iterator<ConsumerRecord<Bytes, Bytes>> _kafkaIterator;
+  private Map<Integer, Long> _consumerOffsets = new HashMap<>(); // tracking current consumed records offsets.
 
-  private long lastLogTime = 0;
-  private long lastCount = 0;
-  private long currentCount = 0L;
+  private long _lastLogTime = 0;
+  private long _lastCount = 0;
+  private long _currentCount = 0L;
 
   public KafkaStreamLevelConsumer(String clientId, String tableName, StreamConfig streamConfig,
       Set<String> sourceFields, String groupId) {
@@ -70,57 +70,57 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
     _messageDecoder = StreamDecoderProvider.create(streamConfig, sourceFields);
 
     _tableAndStreamName = tableName + "-" + streamConfig.getTopicName();
-    INSTANCE_LOGGER = LoggerFactory
+    _instanceLogger = LoggerFactory
         .getLogger(KafkaStreamLevelConsumer.class.getName() + "_" + tableName + "_" + streamConfig.getTopicName());
-    INSTANCE_LOGGER.info("KafkaStreamLevelConsumer: streamConfig : {}", _streamConfig);
+    _instanceLogger.info("KafkaStreamLevelConsumer: streamConfig : {}", _streamConfig);
   }
 
   @Override
   public void start()
       throws Exception {
-    consumer = KafkaStreamLevelConsumerManager.acquireKafkaConsumerForConfig(_kafkaStreamLevelStreamConfig);
+    _consumer = KafkaStreamLevelConsumerManager.acquireKafkaConsumerForConfig(_kafkaStreamLevelStreamConfig);
   }
 
   private void updateKafkaIterator() {
-    consumerRecords = consumer.poll(Duration.ofMillis(_streamConfig.getFetchTimeoutMillis()));
-    kafkaIterator = consumerRecords.iterator();
+    _consumerRecords = _consumer.poll(Duration.ofMillis(_streamConfig.getFetchTimeoutMillis()));
+    _kafkaIterator = _consumerRecords.iterator();
   }
 
   private void resetOffsets() {
-    for (int partition : consumerOffsets.keySet()) {
-      long offsetToSeek = consumerOffsets.get(partition);
-      consumer.seek(new TopicPartition(_streamConfig.getTopicName(), partition), offsetToSeek);
+    for (int partition : _consumerOffsets.keySet()) {
+      long offsetToSeek = _consumerOffsets.get(partition);
+      _consumer.seek(new TopicPartition(_streamConfig.getTopicName(), partition), offsetToSeek);
     }
   }
 
   @Override
   public GenericRow next(GenericRow destination) {
-    if (kafkaIterator == null || !kafkaIterator.hasNext()) {
+    if (_kafkaIterator == null || !_kafkaIterator.hasNext()) {
       updateKafkaIterator();
     }
-    if (kafkaIterator.hasNext()) {
+    if (_kafkaIterator.hasNext()) {
       try {
-        final ConsumerRecord<Bytes, Bytes> record = kafkaIterator.next();
+        final ConsumerRecord<Bytes, Bytes> record = _kafkaIterator.next();
         updateOffsets(record.partition(), record.offset());
         destination = _messageDecoder.decode(record.value().get(), destination);
 
-        ++currentCount;
+        ++_currentCount;
 
         final long now = System.currentTimeMillis();
         // Log every minute or 100k events
-        if (now - lastLogTime > 60000 || currentCount - lastCount >= 100000) {
-          if (lastCount == 0) {
-            INSTANCE_LOGGER.info("Consumed {} events from kafka stream {}", currentCount, _streamConfig.getTopicName());
+        if (now - _lastLogTime > 60000 || _currentCount - _lastCount >= 100000) {
+          if (_lastCount == 0) {
+            _instanceLogger.info("Consumed {} events from kafka stream {}", _currentCount, _streamConfig.getTopicName());
           } else {
-            INSTANCE_LOGGER.info("Consumed {} events from kafka stream {} (rate:{}/s)", currentCount - lastCount,
-                _streamConfig.getTopicName(), (float) (currentCount - lastCount) * 1000 / (now - lastLogTime));
+            _instanceLogger.info("Consumed {} events from kafka stream {} (rate:{}/s)", _currentCount - _lastCount,
+                _streamConfig.getTopicName(), (float) (_currentCount - _lastCount) * 1000 / (now - _lastLogTime));
           }
-          lastCount = currentCount;
-          lastLogTime = now;
+          _lastCount = _currentCount;
+          _lastLogTime = now;
         }
         return destination;
       } catch (Exception e) {
-        INSTANCE_LOGGER.warn("Caught exception while consuming events", e);
+        _instanceLogger.warn("Caught exception while consuming events", e);
         throw e;
       }
     }
@@ -128,22 +128,22 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
   }
 
   private void updateOffsets(int partition, long offset) {
-    consumerOffsets.put(partition, offset + 1);
+    _consumerOffsets.put(partition, offset + 1);
   }
 
   @Override
   public void commit() {
-    consumer.commitSync(getOffsetsMap());
+    _consumer.commitSync(getOffsetsMap());
     // Since the lastest batch may not be consumed fully, so we need to reset kafka consumer's offset.
     resetOffsets();
-    consumerOffsets.clear();
+    _consumerOffsets.clear();
   }
 
   private Map<TopicPartition, OffsetAndMetadata> getOffsetsMap() {
     Map<TopicPartition, OffsetAndMetadata> offsetsMap = new HashMap<>();
-    for (Integer partition : consumerOffsets.keySet()) {
+    for (Integer partition : _consumerOffsets.keySet()) {
       offsetsMap.put(new TopicPartition(_streamConfig.getTopicName(), partition),
-          new OffsetAndMetadata(consumerOffsets.get(partition)));
+          new OffsetAndMetadata(_consumerOffsets.get(partition)));
     }
     return offsetsMap;
   }
@@ -151,11 +151,11 @@ public class KafkaStreamLevelConsumer implements StreamLevelConsumer {
   @Override
   public void shutdown()
       throws Exception {
-    if (consumer != null) {
+    if (_consumer != null) {
       // If offsets commit is not succeed, then reset the offsets here.
       resetOffsets();
-      KafkaStreamLevelConsumerManager.releaseKafkaConsumer(consumer);
-      consumer = null;
+      KafkaStreamLevelConsumerManager.releaseKafkaConsumer(_consumer);
+      _consumer = null;
     }
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumerManager.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumerManager.java
index 2f99984..8238959 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumerManager.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelConsumerManager.java
@@ -61,6 +61,8 @@ import org.slf4j.LoggerFactory;
  * Kafka APIs.
  */
 public class KafkaStreamLevelConsumerManager {
+  private KafkaStreamLevelConsumerManager() {
+  }
 
   private static final Logger LOGGER = LoggerFactory.getLogger(KafkaStreamLevelConsumerManager.class);
   private static final Long IN_USE = -1L;
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelStreamConfig.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelStreamConfig.java
index b3efef1..eb08ddc 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelStreamConfig.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/KafkaStreamLevelStreamConfig.java
@@ -34,8 +34,10 @@ import org.apache.pinot.spi.utils.EqualityUtils;
  */
 public class KafkaStreamLevelStreamConfig {
   private static final String DEFAULT_AUTO_COMMIT_ENABLE = "false";
+  private static final Map<String, String> DEFAULT_PROPS = new HashMap<String, String>() {{
+    put(KafkaStreamConfigProperties.HighLevelConsumer.AUTO_COMMIT_ENABLE, DEFAULT_AUTO_COMMIT_ENABLE);
+  }};
 
-  private static final Map<String, String> defaultProps;
   private String _kafkaTopicName;
   private String _groupId;
   private String _bootstrapServers;
@@ -47,25 +49,21 @@ public class KafkaStreamLevelStreamConfig {
    * @param tableName
    * @param groupId
    */
-  public KafkaStreamLevelStreamConfig(StreamConfig streamConfig, String tableName,
-      String groupId) {
+  public KafkaStreamLevelStreamConfig(StreamConfig streamConfig, String tableName, String groupId) {
     Map<String, String> streamConfigMap = streamConfig.getStreamConfigsMap();
 
     _kafkaTopicName = streamConfig.getTopicName();
-    String hlcBootstrapBrokerUrlKey = KafkaStreamConfigProperties
-        .constructStreamProperty(KafkaStreamConfigProperties.HighLevelConsumer.KAFKA_HLC_BOOTSTRAP_SERVER);
+    String hlcBootstrapBrokerUrlKey =
+        KafkaStreamConfigProperties.constructStreamProperty(KafkaStreamConfigProperties.HighLevelConsumer.KAFKA_HLC_BOOTSTRAP_SERVER);
     _bootstrapServers = streamConfigMap.get(hlcBootstrapBrokerUrlKey);
-    Preconditions.checkNotNull(_bootstrapServers,
-        "Must specify bootstrap broker connect string " + hlcBootstrapBrokerUrlKey + " in high level kafka consumer");
+    Preconditions.checkNotNull(_bootstrapServers, "Must specify bootstrap broker connect string " + hlcBootstrapBrokerUrlKey + " in high level kafka consumer");
     _groupId = groupId;
 
     _kafkaConsumerProperties = new HashMap<>();
-    String kafkaConsumerPropertyPrefix =
-        KafkaStreamConfigProperties.constructStreamProperty(KafkaStreamConfigProperties.KAFKA_CONSUMER_PROP_PREFIX);
+    String kafkaConsumerPropertyPrefix = KafkaStreamConfigProperties.constructStreamProperty(KafkaStreamConfigProperties.KAFKA_CONSUMER_PROP_PREFIX);
     for (String key : streamConfigMap.keySet()) {
       if (key.startsWith(kafkaConsumerPropertyPrefix)) {
-        _kafkaConsumerProperties
-            .put(StreamConfigProperties.getPropertySuffix(key, kafkaConsumerPropertyPrefix), streamConfigMap.get(key));
+        _kafkaConsumerProperties.put(StreamConfigProperties.getPropertySuffix(key, kafkaConsumerPropertyPrefix), streamConfigMap.get(key));
       }
     }
   }
@@ -80,8 +78,8 @@ public class KafkaStreamLevelStreamConfig {
 
   public Properties getKafkaConsumerProperties() {
     Properties props = new Properties();
-    for (String key : defaultProps.keySet()) {
-      props.put(key, defaultProps.get(key));
+    for (String key : DEFAULT_PROPS.keySet()) {
+      props.put(key, DEFAULT_PROPS.get(key));
     }
     for (String key : _kafkaConsumerProperties.keySet()) {
       props.put(key, _kafkaConsumerProperties.get(key));
@@ -93,9 +91,8 @@ public class KafkaStreamLevelStreamConfig {
 
   @Override
   public String toString() {
-    return "KafkaStreamLevelStreamConfig{" + "_kafkaTopicName='" + _kafkaTopicName + '\'' + ", _groupId='" + _groupId
-        + '\'' + ", _bootstrapServers='" + _bootstrapServers + '\'' + ", _kafkaConsumerProperties="
-        + _kafkaConsumerProperties + '}';
+    return "KafkaStreamLevelStreamConfig{" + "_kafkaTopicName='" + _kafkaTopicName + '\'' + ", _groupId='" + _groupId + '\'' + ", _bootstrapServers='"
+        + _bootstrapServers + '\'' + ", _kafkaConsumerProperties=" + _kafkaConsumerProperties + '}';
   }
 
   @Override
@@ -110,9 +107,8 @@ public class KafkaStreamLevelStreamConfig {
 
     KafkaStreamLevelStreamConfig that = (KafkaStreamLevelStreamConfig) o;
 
-    return EqualityUtils.isEqual(_kafkaTopicName, that._kafkaTopicName) && EqualityUtils
-        .isEqual(_groupId, that._groupId) && EqualityUtils.isEqual(_bootstrapServers, that._bootstrapServers)
-        && EqualityUtils.isEqual(_kafkaConsumerProperties, that._kafkaConsumerProperties);
+    return EqualityUtils.isEqual(_kafkaTopicName, that._kafkaTopicName) && EqualityUtils.isEqual(_groupId, that._groupId) && EqualityUtils
+        .isEqual(_bootstrapServers, that._bootstrapServers) && EqualityUtils.isEqual(_kafkaConsumerProperties, that._kafkaConsumerProperties);
   }
 
   @Override
@@ -127,9 +123,4 @@ public class KafkaStreamLevelStreamConfig {
   public String getBootstrapServers() {
     return _bootstrapServers;
   }
-
-  static {
-    defaultProps = new HashMap<>();
-    defaultProps.put(KafkaStreamConfigProperties.HighLevelConsumer.AUTO_COMMIT_ENABLE, DEFAULT_AUTO_COMMIT_ENABLE);
-  }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataProducer.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataProducer.java
index 497abea..ed019d4 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataProducer.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataProducer.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
 public class KafkaDataProducer implements StreamDataProducer {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(KafkaDataProducer.class);
-  private Producer<byte[], byte[]> producer;
+  private Producer<byte[], byte[]> _producer;
 
   @Override
   public void init(Properties props) {
@@ -50,7 +50,7 @@ public class KafkaDataProducer implements StreamDataProducer {
     props.remove("request.required.acks");
     props.remove("serializer.class");
     try {
-      this.producer = new KafkaProducer<>(props);
+      _producer = new KafkaProducer<>(props);
     } catch (Exception e) {
       LOGGER.error("Failed to create a Kafka 2 Producer.", e);
     }
@@ -59,19 +59,19 @@ public class KafkaDataProducer implements StreamDataProducer {
   @Override
   public void produce(String topic, byte[] payload) {
     ProducerRecord<byte[], byte[]> record = new ProducerRecord(topic, payload);
-    producer.send(record);
-    producer.flush();
+    _producer.send(record);
+    _producer.flush();
   }
 
   @Override
   public void produce(String topic, byte[] key, byte[] payload) {
     ProducerRecord<byte[], byte[]> record = new ProducerRecord(topic, key, payload);
-    producer.send(record);
-    producer.flush();
+    _producer.send(record);
+    _producer.flush();
   }
 
   @Override
   public void close() {
-    producer.close();
+    _producer.close();
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataServerStartable.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataServerStartable.java
index 2a4e623..f74bc01 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataServerStartable.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-2.0/src/main/java/org/apache/pinot/plugin/stream/kafka20/server/KafkaDataServerStartable.java
@@ -45,59 +45,59 @@ public class KafkaDataServerStartable implements StreamDataServerStartable {
   private static final String LOG_DIRS = "log.dirs";
   private static final String PORT = "port";
 
-  private KafkaServerStartable serverStartable;
-  private int port;
-  private String zkStr;
-  private String logDirPath;
-  private AdminClient adminClient;
+  private KafkaServerStartable _serverStartable;
+  private int _port;
+  private String _zkStr;
+  private String _logDirPath;
+  private AdminClient _adminClient;
 
   public void init(Properties props) {
-    port = (int) props.get(PORT);
-    zkStr = props.getProperty(ZOOKEEPER_CONNECT);
-    logDirPath = props.getProperty(LOG_DIRS);
+    _port = (int) props.get(PORT);
+    _zkStr = props.getProperty(ZOOKEEPER_CONNECT);
+    _logDirPath = props.getProperty(LOG_DIRS);
 
     // Create the ZK nodes for Kafka, if needed
-    int indexOfFirstSlash = zkStr.indexOf('/');
+    int indexOfFirstSlash = _zkStr.indexOf('/');
     if (indexOfFirstSlash != -1) {
-      String bareZkUrl = zkStr.substring(0, indexOfFirstSlash);
-      String zkNodePath = zkStr.substring(indexOfFirstSlash);
+      String bareZkUrl = _zkStr.substring(0, indexOfFirstSlash);
+      String zkNodePath = _zkStr.substring(indexOfFirstSlash);
       ZkClient client = new ZkClient(bareZkUrl);
       client.createPersistent(zkNodePath, true);
       client.close();
     }
 
-    File logDir = new File(logDirPath);
+    File logDir = new File(_logDirPath);
     logDir.mkdirs();
 
     props.put("zookeeper.session.timeout.ms", "60000");
-    serverStartable = new KafkaServerStartable(new KafkaConfig(props));
+    _serverStartable = new KafkaServerStartable(new KafkaConfig(props));
     final Map<String, Object> config = new HashMap<>();
-    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port);
+    config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + _port);
     config.put(AdminClientConfig.CLIENT_ID_CONFIG, "Kafka2AdminClient-" + UUID.randomUUID().toString());
     config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, 15000);
-    adminClient = KafkaAdminClient.create(config);
+    _adminClient = KafkaAdminClient.create(config);
   }
 
   @Override
   public void start() {
-    serverStartable.startup();
+    _serverStartable.startup();
   }
 
   @Override
   public void stop() {
-    serverStartable.shutdown();
-    FileUtils.deleteQuietly(new File(serverStartable.staticServerConfig().logDirs().apply(0)));
+    _serverStartable.shutdown();
+    FileUtils.deleteQuietly(new File(_serverStartable.staticServerConfig().logDirs().apply(0)));
   }
 
   @Override
   public void createTopic(String topic, Properties props) {
     int partition = (Integer) props.get("partition");
     Collection<NewTopic> topicList = Arrays.asList(new NewTopic(topic, partition, (short) 1));
-    adminClient.createTopics(topicList);
+    _adminClient.createTopics(topicList);
   }
 
   @Override
   public int getPort() {
-    return port;
+    return _port;
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/pom.xml b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/pom.xml
index 9a229b5..632390a 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/pom.xml
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/pom.xml
@@ -36,9 +36,6 @@
   <properties>
     <pinot.root>${basedir}/../../..</pinot.root>
     <phase.prop>package</phase.prop>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
   <dependencies>
     <dependency>
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/main/java/org/apache/pinot/plugin/stream/kafka/KafkaStreamConfigProperties.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/main/java/org/apache/pinot/plugin/stream/kafka/KafkaStreamConfigProperties.java
index ca2817b..477bbf5 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/main/java/org/apache/pinot/plugin/stream/kafka/KafkaStreamConfigProperties.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/main/java/org/apache/pinot/plugin/stream/kafka/KafkaStreamConfigProperties.java
@@ -26,6 +26,9 @@ import org.apache.pinot.spi.stream.StreamConfigProperties;
  * Property key definitions for all kafka stream related properties
  */
 public class KafkaStreamConfigProperties {
+  private KafkaStreamConfigProperties() {
+  }
+
   public static final String DOT_SEPARATOR = ".";
   public static final String STREAM_TYPE = "kafka";
 
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/test/java/org/apache/pinot/plugin/stream/kafka/KafkaJSONMessageDecoderTest.java b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/test/java/org/apache/pinot/plugin/stream/kafka/KafkaJSONMessageDecoderTest.java
index 48bd405..52ae75f 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/test/java/org/apache/pinot/plugin/stream/kafka/KafkaJSONMessageDecoderTest.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kafka-base/src/test/java/org/apache/pinot/plugin/stream/kafka/KafkaJSONMessageDecoderTest.java
@@ -34,7 +34,7 @@ import org.testng.annotations.Test;
 
 public class KafkaJSONMessageDecoderTest {
 
-  private static ObjectMapper objectMapper = new ObjectMapper();
+  private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
 
   @Test
   public void testJsonDecoderWithoutOutgoingTimeSpec()
@@ -85,7 +85,7 @@ public class KafkaJSONMessageDecoderTest {
       GenericRow r = new GenericRow();
       String line = reader.readLine();
       while (line != null) {
-        JsonNode jsonNode = objectMapper.reader().readTree(line);
+        JsonNode jsonNode = OBJECT_MAPPER.reader().readTree(line);
         decoder.decode(line.getBytes(), r);
         for (String field : sourceFields.keySet()) {
           Object actualValue = r.getValue(field);
@@ -108,6 +108,7 @@ public class KafkaJSONMessageDecoderTest {
               break;
             default:
               Assert.assertTrue(false, "Shouldn't arrive here.");
+              break;
           }
         }
         line = reader.readLine();
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/pom.xml b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/pom.xml
index c061730..2c285b2 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/pom.xml
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/pom.xml
@@ -39,9 +39,6 @@
     <aws.version>2.14.28</aws.version>
     <easymock.version>4.2</easymock.version>
     <reactive.version>1.0.2</reactive.version>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencyManagement>
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConfig.java b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConfig.java
index 73e5a99..fa344aa 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConfig.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConfig.java
@@ -86,5 +86,7 @@ public class KinesisConfig {
     return _secretKey;
   }
 
-  public String getEndpoint() { return _endpoint; }
+  public String getEndpoint() {
+    return _endpoint;
+  }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisPartitionGroupOffset.java b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisPartitionGroupOffset.java
index 1ac7770..1af2738 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisPartitionGroupOffset.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisPartitionGroupOffset.java
@@ -79,14 +79,14 @@ public class KinesisPartitionGroupOffset implements StreamPartitionMsgOffset {
     Preconditions.checkNotNull(o);
     KinesisPartitionGroupOffset other = (KinesisPartitionGroupOffset) o;
     Preconditions.checkNotNull(other._shardToStartSequenceMap);
-    Preconditions.checkNotNull(this._shardToStartSequenceMap);
+    Preconditions.checkNotNull(_shardToStartSequenceMap);
     Preconditions
         .checkState(other._shardToStartSequenceMap.size() == 1, "Only 1 shard per consumer supported. Found: %s",
             other._shardToStartSequenceMap);
     Preconditions
-        .checkState(this._shardToStartSequenceMap.size() == 1, "Only 1 shard per consumer supported. Found: %s",
-            this._shardToStartSequenceMap);
-    return this._shardToStartSequenceMap.values().iterator().next()
+        .checkState(_shardToStartSequenceMap.size() == 1, "Only 1 shard per consumer supported. Found: %s",
+            _shardToStartSequenceMap);
+    return _shardToStartSequenceMap.values().iterator().next()
         .compareTo(other._shardToStartSequenceMap.values().iterator().next());
   }
 }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisConsumerTest.java b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisConsumerTest.java
index ba82502..c006e1a 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisConsumerTest.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisConsumerTest.java
@@ -50,17 +50,17 @@ public class KinesisConsumerTest {
   private static final String TABLE_NAME_WITH_TYPE = "kinesisTest_REALTIME";
   private static final String STREAM_NAME = "kinesis-test";
   private static final String AWS_REGION = "us-west-2";
-  public static final int TIMEOUT = 1000;
-  public static final int NUM_RECORDS = 10;
-  public static final String DUMMY_RECORD_PREFIX = "DUMMY_RECORD-";
-  public static final String PARTITION_KEY_PREFIX = "PARTITION_KEY-";
-  public static final String PLACEHOLDER = "DUMMY";
-  public static final int MAX_RECORDS_TO_FETCH = 20;
-
-  private static KinesisConnectionHandler kinesisConnectionHandler;
-  private static StreamConsumerFactory streamConsumerFactory;
-  private static KinesisClient kinesisClient;
-  private List<Record> recordList;
+  private static final int TIMEOUT = 1000;
+  private static final int NUM_RECORDS = 10;
+  private static final String DUMMY_RECORD_PREFIX = "DUMMY_RECORD-";
+  private static final String PARTITION_KEY_PREFIX = "PARTITION_KEY-";
+  private static final String PLACEHOLDER = "DUMMY";
+  private static final int MAX_RECORDS_TO_FETCH = 20;
+
+  private KinesisConnectionHandler _kinesisConnectionHandler;
+  private StreamConsumerFactory _streamConsumerFactory;
+  private KinesisClient _kinesisClient;
+  private List<Record> _recordList;
 
   private KinesisConfig getKinesisConfig() {
     Map<String, String> props = new HashMap<>();
@@ -82,17 +82,17 @@ public class KinesisConsumerTest {
 
   @BeforeMethod
   public void setupTest() {
-    kinesisConnectionHandler = createMock(KinesisConnectionHandler.class);
-    kinesisClient = createMock(KinesisClient.class);
-    streamConsumerFactory = createMock(StreamConsumerFactory.class);
+    _kinesisConnectionHandler = createMock(KinesisConnectionHandler.class);
+    _kinesisClient = createMock(KinesisClient.class);
+    _streamConsumerFactory = createMock(StreamConsumerFactory.class);
 
-    recordList = new ArrayList<>();
+    _recordList = new ArrayList<>();
 
     for (int i = 0; i < NUM_RECORDS; i++) {
       Record record =
           Record.builder().data(SdkBytes.fromUtf8String(DUMMY_RECORD_PREFIX + i)).partitionKey(PARTITION_KEY_PREFIX + i)
               .sequenceNumber(String.valueOf(i + 1)).build();
-      recordList.add(record);
+      _recordList.add(record);
     }
   }
 
@@ -102,17 +102,17 @@ public class KinesisConsumerTest {
     Capture<GetShardIteratorRequest> getShardIteratorRequestCapture = Capture.newInstance();
 
     GetRecordsResponse getRecordsResponse =
-        GetRecordsResponse.builder().nextShardIterator(null).records(recordList).build();
+        GetRecordsResponse.builder().nextShardIterator(null).records(_recordList).build();
     GetShardIteratorResponse getShardIteratorResponse =
         GetShardIteratorResponse.builder().shardIterator(PLACEHOLDER).build();
 
-    expect(kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
-    expect(kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
+    expect(_kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
+    expect(_kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
         .anyTimes();
 
-    replay(kinesisClient);
+    replay(_kinesisClient);
 
-    KinesisConsumer kinesisConsumer = new KinesisConsumer(getKinesisConfig(), kinesisClient);
+    KinesisConsumer kinesisConsumer = new KinesisConsumer(getKinesisConfig(), _kinesisClient);
 
     Map<String, String> shardToSequenceMap = new HashMap<>();
     shardToSequenceMap.put("0", "1");
@@ -134,18 +134,18 @@ public class KinesisConsumerTest {
     Capture<GetShardIteratorRequest> getShardIteratorRequestCapture = Capture.newInstance();
 
     GetRecordsResponse getRecordsResponse =
-        GetRecordsResponse.builder().nextShardIterator(PLACEHOLDER).records(recordList).build();
+        GetRecordsResponse.builder().nextShardIterator(PLACEHOLDER).records(_recordList).build();
     GetShardIteratorResponse getShardIteratorResponse =
         GetShardIteratorResponse.builder().shardIterator(PLACEHOLDER).build();
 
-    expect(kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
-    expect(kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
+    expect(_kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
+    expect(_kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
         .anyTimes();
 
-    replay(kinesisClient);
+    replay(_kinesisClient);
 
     KinesisConfig kinesisConfig = getKinesisConfig();
-    KinesisConsumer kinesisConsumer = new KinesisConsumer(kinesisConfig, kinesisClient);
+    KinesisConsumer kinesisConsumer = new KinesisConsumer(kinesisConfig, _kinesisClient);
 
     Map<String, String> shardToSequenceMap = new HashMap<>();
     shardToSequenceMap.put("0", "1");
@@ -169,18 +169,18 @@ public class KinesisConsumerTest {
     Capture<GetShardIteratorRequest> getShardIteratorRequestCapture = Capture.newInstance();
 
     GetRecordsResponse getRecordsResponse =
-        GetRecordsResponse.builder().nextShardIterator(null).records(recordList).childShards(shardList).build();
+        GetRecordsResponse.builder().nextShardIterator(null).records(_recordList).childShards(shardList).build();
     GetShardIteratorResponse getShardIteratorResponse =
         GetShardIteratorResponse.builder().shardIterator(PLACEHOLDER).build();
 
-    expect(kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
-    expect(kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
+    expect(_kinesisClient.getRecords(capture(getRecordsRequestCapture))).andReturn(getRecordsResponse).anyTimes();
+    expect(_kinesisClient.getShardIterator(capture(getShardIteratorRequestCapture))).andReturn(getShardIteratorResponse)
         .anyTimes();
 
-    replay(kinesisClient);
+    replay(_kinesisClient);
 
     KinesisConfig kinesisConfig = getKinesisConfig();
-    KinesisConsumer kinesisConsumer = new KinesisConsumer(kinesisConfig, kinesisClient);
+    KinesisConsumer kinesisConsumer = new KinesisConsumer(kinesisConfig, _kinesisClient);
 
     Map<String, String> shardToSequenceMap = new HashMap<>();
     shardToSequenceMap.put("0", "1");
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisStreamMetadataProviderTest.java b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisStreamMetadataProviderTest.java
index 5ae5b35..2425f2a 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisStreamMetadataProviderTest.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/test/java/org/apache/pinot/plugin/stream/kinesis/KinesisStreamMetadataProviderTest.java
@@ -47,13 +47,13 @@ public class KinesisStreamMetadataProviderTest {
   private static final String AWS_REGION = "us-west-2";
   private static final String SHARD_ID_0 = "0";
   private static final String SHARD_ID_1 = "1";
-  public static final String CLIENT_ID = "dummy";
-  public static final int TIMEOUT = 1000;
+  private static final String CLIENT_ID = "dummy";
+  private static final int TIMEOUT = 1000;
 
-  private static KinesisConnectionHandler kinesisConnectionHandler;
-  private KinesisStreamMetadataProvider kinesisStreamMetadataProvider;
-  private static StreamConsumerFactory streamConsumerFactory;
-  private static PartitionGroupConsumer partitionGroupConsumer;
+  private KinesisConnectionHandler _kinesisConnectionHandler;
+  private KinesisStreamMetadataProvider _kinesisStreamMetadataProvider;
+  private StreamConsumerFactory _streamConsumerFactory;
+  private PartitionGroupConsumer _partitionGroupConsumer;
 
   private StreamConfig getStreamConfig() {
     Map<String, String> props = new HashMap<>();
@@ -71,12 +71,12 @@ public class KinesisStreamMetadataProviderTest {
 
   @BeforeMethod
   public void setupTest() {
-    kinesisConnectionHandler = createMock(KinesisConnectionHandler.class);
-    streamConsumerFactory = createMock(StreamConsumerFactory.class);
-    partitionGroupConsumer = createNiceMock(PartitionGroupConsumer.class);
-    kinesisStreamMetadataProvider =
-        new KinesisStreamMetadataProvider(CLIENT_ID, getStreamConfig(), kinesisConnectionHandler,
-            streamConsumerFactory);
+    _kinesisConnectionHandler = createMock(KinesisConnectionHandler.class);
+    _streamConsumerFactory = createMock(StreamConsumerFactory.class);
+    _partitionGroupConsumer = createNiceMock(PartitionGroupConsumer.class);
+    _kinesisStreamMetadataProvider =
+        new KinesisStreamMetadataProvider(CLIENT_ID, getStreamConfig(), _kinesisConnectionHandler,
+            _streamConsumerFactory);
   }
 
   @Test
@@ -85,10 +85,10 @@ public class KinesisStreamMetadataProviderTest {
     Shard shard0 = Shard.builder().shardId(SHARD_ID_0).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
     Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
 
-    expect(kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
-    replay(kinesisConnectionHandler);
+    expect(_kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
+    replay(_kinesisConnectionHandler);
 
-    List<PartitionGroupMetadata> result = kinesisStreamMetadataProvider
+    List<PartitionGroupMetadata> result = _kinesisStreamMetadataProvider
         .computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(), new ArrayList<>(), TIMEOUT);
 
 
@@ -114,19 +114,21 @@ public class KinesisStreamMetadataProviderTest {
     Capture<Integer> intArguments = newCapture(CaptureType.ALL);
     Capture<String> stringCapture = newCapture(CaptureType.ALL);
 
-    Shard shard0 = Shard.builder().shardId(SHARD_ID_0).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build();
-    Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
-    expect(kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
-    expect(streamConsumerFactory
+    Shard shard0 = Shard.builder().shardId(SHARD_ID_0).sequenceNumberRange(
+        SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build();
+    Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange(
+        SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
+    expect(_kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
+    expect(_streamConsumerFactory
         .createPartitionGroupConsumer(capture(stringCapture), capture(partitionGroupMetadataCapture)))
-        .andReturn(partitionGroupConsumer).anyTimes();
-    expect(partitionGroupConsumer
+        .andReturn(_partitionGroupConsumer).anyTimes();
+    expect(_partitionGroupConsumer
         .fetchMessages(capture(checkpointArgs), capture(checkpointArgs), captureInt(intArguments)))
         .andReturn(new KinesisRecordsBatch(new ArrayList<>(), "0", true)).anyTimes();
 
-    replay(kinesisConnectionHandler, streamConsumerFactory, partitionGroupConsumer);
+    replay(_kinesisConnectionHandler, _streamConsumerFactory, _partitionGroupConsumer);
 
-    List<PartitionGroupMetadata> result = kinesisStreamMetadataProvider
+    List<PartitionGroupMetadata> result = _kinesisStreamMetadataProvider
         .computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(), currentPartitionGroupMeta, TIMEOUT);
 
     Assert.assertEquals(result.size(), 1);
@@ -150,20 +152,22 @@ public class KinesisStreamMetadataProviderTest {
     Capture<Integer> intArguments = newCapture(CaptureType.ALL);
     Capture<String> stringCapture = newCapture(CaptureType.ALL);
 
-    Shard shard0 = Shard.builder().shardId(SHARD_ID_0).parentShardId(SHARD_ID_1).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
-    Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build();
+    Shard shard0 = Shard.builder().shardId(SHARD_ID_0).parentShardId(SHARD_ID_1).sequenceNumberRange(
+        SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
+    Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange(
+        SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build();
 
-    expect(kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
-    expect(streamConsumerFactory
+    expect(_kinesisConnectionHandler.getShards()).andReturn(ImmutableList.of(shard0, shard1)).anyTimes();
+    expect(_streamConsumerFactory
         .createPartitionGroupConsumer(capture(stringCapture), capture(partitionGroupMetadataCapture)))
-        .andReturn(partitionGroupConsumer).anyTimes();
-    expect(partitionGroupConsumer
+        .andReturn(_partitionGroupConsumer).anyTimes();
+    expect(_partitionGroupConsumer
         .fetchMessages(capture(checkpointArgs), capture(checkpointArgs), captureInt(intArguments)))
         .andReturn(new KinesisRecordsBatch(new ArrayList<>(), "0", true)).anyTimes();
 
-    replay(kinesisConnectionHandler, streamConsumerFactory, partitionGroupConsumer);
+    replay(_kinesisConnectionHandler, _streamConsumerFactory, _partitionGroupConsumer);
 
-    List<PartitionGroupMetadata> result = kinesisStreamMetadataProvider
+    List<PartitionGroupMetadata> result = _kinesisStreamMetadataProvider
         .computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(), currentPartitionGroupMeta, TIMEOUT);
 
     Assert.assertEquals(result.size(), 1);
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/pom.xml b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/pom.xml
index 6eb8947..fd7128e 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/pom.xml
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/pom.xml
@@ -56,9 +56,6 @@
     <grpc-protobuf-lite.version>1.19.0</grpc-protobuf-lite.version>
     <swagger-annotations.version>1.5.21</swagger-annotations.version>
     <okio.version>1.6.0</okio.version>
-
-    <!-- TODO: delete this prop once all the checkstyle warnings are fixed -->
-    <checkstyle.fail.on.violation>false</checkstyle.fail.on.violation>
   </properties>
 
   <dependencies>
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/MessageIdStreamOffset.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/MessageIdStreamOffset.java
index 761e594..45cfca0 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/MessageIdStreamOffset.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/MessageIdStreamOffset.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * {@link StreamPartitionMsgOffset} implementation for Pulsar {@link MessageId}
  */
 public class MessageIdStreamOffset implements StreamPartitionMsgOffset {
-  private Logger LOGGER = LoggerFactory.getLogger(MessageIdStreamOffset.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(MessageIdStreamOffset.class);
   private MessageId _messageId;
 
   public MessageIdStreamOffset(MessageId messageId) {
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarMessageBatch.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarMessageBatch.java
index 8b69dd6..aba8ea7 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarMessageBatch.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarMessageBatch.java
@@ -34,30 +34,30 @@ import org.apache.pulsar.client.internal.DefaultImplementation;
  */
 public class PulsarMessageBatch implements MessageBatch<byte[]> {
 
-  private List<Message<byte[]>> messageList = new ArrayList<>();
+  private List<Message<byte[]>> _messageList = new ArrayList<>();
 
   public PulsarMessageBatch(Iterable<Message<byte[]>> iterable) {
-    iterable.forEach(messageList::add);
+    iterable.forEach(_messageList::add);
   }
 
   @Override
   public int getMessageCount() {
-    return messageList.size();
+    return _messageList.size();
   }
 
   @Override
   public byte[] getMessageAtIndex(int index) {
-    return messageList.get(index).getData();
+    return _messageList.get(index).getData();
   }
 
   @Override
   public int getMessageOffsetAtIndex(int index) {
-    return ByteBuffer.wrap(messageList.get(index).getData()).arrayOffset();
+    return ByteBuffer.wrap(_messageList.get(index).getData()).arrayOffset();
   }
 
   @Override
   public int getMessageLengthAtIndex(int index) {
-    return messageList.get(index).getData().length;
+    return _messageList.get(index).getData().length;
   }
 
   /**
@@ -72,7 +72,7 @@ public class PulsarMessageBatch implements MessageBatch<byte[]> {
    */
   @Override
   public StreamPartitionMsgOffset getNextStreamParitionMsgOffsetAtIndex(int index) {
-    MessageIdImpl currentMessageId = MessageIdImpl.convertToMessageIdImpl(messageList.get(index).getMessageId());
+    MessageIdImpl currentMessageId = MessageIdImpl.convertToMessageIdImpl(_messageList.get(index).getMessageId());
     MessageId nextMessageId = DefaultImplementation
         .newMessageId(currentMessageId.getLedgerId(), currentMessageId.getEntryId() + 1,
             currentMessageId.getPartitionIndex());
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumer.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumer.java
index 0a0e45d..fc03b9c 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumer.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumer.java
@@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory;
  * A {@link StreamLevelConsumer} implementation for the Pulsar stream
  */
 public class PulsarStreamLevelConsumer implements StreamLevelConsumer {
-  private Logger LOGGER;
+  private Logger _logger;
 
   private StreamMessageDecoder _messageDecoder;
 
@@ -43,9 +43,9 @@ public class PulsarStreamLevelConsumer implements StreamLevelConsumer {
 
   private Reader<byte[]> _reader;
 
-  private long lastLogTime = 0;
-  private long lastCount = 0;
-  private long currentCount = 0L;
+  private long _lastLogTime = 0;
+  private long _lastCount = 0;
+  private long _currentCount = 0L;
 
   public PulsarStreamLevelConsumer(String clientId, String tableName, StreamConfig streamConfig,
       Set<String> sourceFields, String subscriberId) {
@@ -54,9 +54,9 @@ public class PulsarStreamLevelConsumer implements StreamLevelConsumer {
 
     _messageDecoder = StreamDecoderProvider.create(streamConfig, sourceFields);
 
-    LOGGER =
+    _logger =
         LoggerFactory.getLogger(PulsarConfig.class.getName() + "_" + tableName + "_" + streamConfig.getTopicName());
-    LOGGER.info("PulsarStreamLevelConsumer: streamConfig : {}", _streamConfig);
+    _logger.info("PulsarStreamLevelConsumer: streamConfig : {}", _streamConfig);
   }
 
   @Override
@@ -75,24 +75,24 @@ public class PulsarStreamLevelConsumer implements StreamLevelConsumer {
         final Message<byte[]> record = _reader.readNext();
         destination = _messageDecoder.decode(record.getData(), destination);
 
-        ++currentCount;
+        ++_currentCount;
 
         final long now = System.currentTimeMillis();
         // Log every minute or 100k events
-        if (now - lastLogTime > 60000 || currentCount - lastCount >= 100000) {
-          if (lastCount == 0) {
-            LOGGER.info("Consumed {} events from kafka stream {}", currentCount, _streamConfig.getTopicName());
+        if (now - _lastLogTime > 60000 || _currentCount - _lastCount >= 100000) {
+          if (_lastCount == 0) {
+            _logger.info("Consumed {} events from kafka stream {}", _currentCount, _streamConfig.getTopicName());
           } else {
-            LOGGER.info("Consumed {} events from kafka stream {} (rate:{}/s)", currentCount - lastCount,
-                _streamConfig.getTopicName(), (float) (currentCount - lastCount) * 1000 / (now - lastLogTime));
+            _logger.info("Consumed {} events from kafka stream {} (rate:{}/s)", _currentCount - _lastCount,
+                _streamConfig.getTopicName(), (float) (_currentCount - _lastCount) * 1000 / (now - _lastLogTime));
           }
-          lastCount = currentCount;
-          lastLogTime = now;
+          _lastCount = _currentCount;
+          _lastLogTime = now;
         }
         return destination;
       }
     } catch (Exception e) {
-      LOGGER.warn("Caught exception while consuming events", e);
+      _logger.warn("Caught exception while consuming events", e);
     }
     return null;
   }
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumerManager.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumerManager.java
index 6788f07..325ebcc 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumerManager.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamLevelConsumerManager.java
@@ -36,6 +36,9 @@ import org.slf4j.LoggerFactory;
  * Implements pulsar high level connection manager.
  */
 public class PulsarStreamLevelConsumerManager {
+  private PulsarStreamLevelConsumerManager() {
+  }
+
   private static final Logger LOGGER = LoggerFactory.getLogger(PulsarStreamLevelConsumerManager.class);
   private static final Long IN_USE = -1L;
   private static final long CONSUMER_SHUTDOWN_DELAY_MILLIS = TimeUnit.SECONDS.toMillis(60); // One minute
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamMetadataProvider.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamMetadataProvider.java
index 4072fd0..17145cb 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamMetadataProvider.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/main/java/org/apache/pinot/plugin/stream/pulsar/PulsarStreamMetadataProvider.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
  * A {@link StreamMetadataProvider} implementation for the Pulsar stream
  */
 public class PulsarStreamMetadataProvider extends PulsarPartitionLevelConnectionHandler implements StreamMetadataProvider {
-  private Logger LOGGER = LoggerFactory.getLogger(PulsarStreamMetadataProvider.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(PulsarStreamMetadataProvider.class);
 
   private StreamConfig _streamConfig;
   private int _partition;
diff --git a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/test/java/org/apache/pinot/plugin/stream/pulsar/PulsarStandaloneCluster.java b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/test/java/org/apache/pinot/plugin/stream/pulsar/PulsarStandaloneCluster.java
index e5e9cfd..d933f7f 100644
--- a/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/test/java/org/apache/pinot/plugin/stream/pulsar/PulsarStandaloneCluster.java
+++ b/pinot-plugins/pinot-stream-ingestion/pinot-pulsar/src/test/java/org/apache/pinot/plugin/stream/pulsar/PulsarStandaloneCluster.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
 
 
 public class PulsarStandaloneCluster {
-  private static Logger LOGGER = LoggerFactory.getLogger(PulsarStandaloneCluster.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(PulsarStandaloneCluster.class);
 
   public static final String DEFAULT_STANDALONE_CONF = "standalone.properties";
   public static final String DEFAULT_ZK_DIR = "pulsar-zk";

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


Mime
View raw message