hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject [1/2] hadoop git commit: HDFS-8100. Refactor DFSClient.Conf to a standalone class and separates short-circuit related conf to ShortCircuitConf.
Date Fri, 10 Apr 2015 21:51:00 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d17c5017 -> 1113aca7f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
new file mode 100644
index 0000000..e781b16
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -0,0 +1,738 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client.impl;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.BlockReaderFactory;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.util.ByteArrayManager;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.util.DataChecksum;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * DFSClient configuration 
+ */
+public class DfsClientConf {
+
+  private final int hdfsTimeout;    // timeout value for a DFS operation.
+
+  private final int maxFailoverAttempts;
+  private final int maxRetryAttempts;
+  private final int failoverSleepBaseMillis;
+  private final int failoverSleepMaxMillis;
+  private final int maxBlockAcquireFailures;
+  private final int datanodeSocketWriteTimeout;
+  private final int ioBufferSize;
+  private final ChecksumOpt defaultChecksumOpt;
+  private final int writePacketSize;
+  private final int writeMaxPackets;
+  private final ByteArrayManager.Conf writeByteArrayManagerConf;
+  private final int socketTimeout;
+  private final long excludedNodesCacheExpiry;
+  /** Wait time window (in msec) if BlockMissingException is caught */
+  private final int timeWindow;
+  private final int numCachedConnRetry;
+  private final int numBlockWriteRetry;
+  private final int numBlockWriteLocateFollowingRetry;
+  private final int blockWriteLocateFollowingInitialDelayMs;
+  private final long defaultBlockSize;
+  private final long prefetchSize;
+  private final short defaultReplication;
+  private final String taskId;
+  private final FsPermission uMask;
+  private final boolean connectToDnViaHostname;
+  private final boolean hdfsBlocksMetadataEnabled;
+  private final int fileBlockStorageLocationsNumThreads;
+  private final int fileBlockStorageLocationsTimeoutMs;
+  private final int retryTimesForGetLastBlockLength;
+  private final int retryIntervalForGetLastBlockLength;
+  private final long datanodeRestartTimeout;
+  private final long slowIoWarningThresholdMs;
+
+  private final ShortCircuitConf shortCircuitConf;
+
+  public DfsClientConf(Configuration conf) {
+    // The hdfsTimeout is currently the same as the ipc timeout 
+    hdfsTimeout = Client.getTimeout(conf);
+
+    maxFailoverAttempts = conf.getInt(
+        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
+        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+    maxRetryAttempts = conf.getInt(
+        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
+        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
+    failoverSleepBaseMillis = conf.getInt(
+        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
+        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
+    failoverSleepMaxMillis = conf.getInt(
+        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
+        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
+
+    maxBlockAcquireFailures = conf.getInt(
+        DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+        DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
+    datanodeSocketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
+        HdfsServerConstants.WRITE_TIMEOUT);
+    ioBufferSize = conf.getInt(
+        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
+        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
+    defaultChecksumOpt = getChecksumOptFromConf(conf);
+    socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+        HdfsServerConstants.READ_TIMEOUT);
+    /** dfs.write.packet.size is an internal config variable */
+    writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
+        DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
+    writeMaxPackets = conf.getInt(
+        DFSConfigKeys.DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_KEY,
+        DFSConfigKeys.DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_DEFAULT);
+    
+    final boolean byteArrayManagerEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_KEY,
+        DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_DEFAULT);
+    if (!byteArrayManagerEnabled) {
+      writeByteArrayManagerConf = null;
+    } else {
+      final int countThreshold = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_KEY,
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_DEFAULT);
+      final int countLimit = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_KEY,
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_DEFAULT);
+      final long countResetTimePeriodMs = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_KEY,
+          DFSConfigKeys.DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
+      writeByteArrayManagerConf = new ByteArrayManager.Conf(
+          countThreshold, countLimit, countResetTimePeriodMs); 
+    }
+    
+    defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
+        DFS_BLOCK_SIZE_DEFAULT);
+    defaultReplication = (short) conf.getInt(
+        DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
+    taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
+    excludedNodesCacheExpiry = conf.getLong(
+        DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
+        DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
+    prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
+        10 * defaultBlockSize);
+    timeWindow = conf.getInt(
+        HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
+        HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
+    numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
+        DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
+    numBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
+        DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
+    numBlockWriteLocateFollowingRetry = conf.getInt(
+        DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
+        DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
+    blockWriteLocateFollowingInitialDelayMs = conf.getInt(
+        DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+        DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT);
+    uMask = FsPermission.getUMask(conf);
+    connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
+        DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
+    hdfsBlocksMetadataEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
+        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
+    fileBlockStorageLocationsNumThreads = conf.getInt(
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
+    fileBlockStorageLocationsTimeoutMs = conf.getInt(
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
+    retryTimesForGetLastBlockLength = conf.getInt(
+        HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
+        HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
+    retryIntervalForGetLastBlockLength = conf.getInt(
+        HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
+        HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
+
+
+    datanodeRestartTimeout = conf.getLong(
+        DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
+        DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
+    slowIoWarningThresholdMs = conf.getLong(
+        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
+        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
+    
+    shortCircuitConf = new ShortCircuitConf(conf);
+  }
+
+  private DataChecksum.Type getChecksumType(Configuration conf) {
+    final String checksum = conf.get(
+        DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
+        DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
+    try {
+      return DataChecksum.Type.valueOf(checksum);
+    } catch(IllegalArgumentException iae) {
+      DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default "
+          + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
+      return DataChecksum.Type.valueOf(
+          DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
+    }
+  }
+
+  // Construct a checksum option from conf
+  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+    DataChecksum.Type type = getChecksumType(conf);
+    int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
+        DFS_BYTES_PER_CHECKSUM_DEFAULT);
+    return new ChecksumOpt(type, bytesPerChecksum);
+  }
+
+  /** create a DataChecksum with the given option. */
+  public DataChecksum createChecksum(ChecksumOpt userOpt) {
+    // Fill in any missing field with the default.
+    ChecksumOpt opt = ChecksumOpt.processChecksumOpt(
+        defaultChecksumOpt, userOpt);
+    DataChecksum dataChecksum = DataChecksum.newDataChecksum(
+        opt.getChecksumType(),
+        opt.getBytesPerChecksum());
+    if (dataChecksum == null) {
+      throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
+          + userOpt + ", default=" + defaultChecksumOpt
+          + ", effective=null");
+    }
+    return dataChecksum;
+  }
+
+  @VisibleForTesting
+  public int getBlockWriteLocateFollowingInitialDelayMs() {
+    return blockWriteLocateFollowingInitialDelayMs;
+  }
+
+  /**
+   * @return the hdfsTimeout
+   */
+  public int getHdfsTimeout() {
+    return hdfsTimeout;
+  }
+
+  /**
+   * @return the maxFailoverAttempts
+   */
+  public int getMaxFailoverAttempts() {
+    return maxFailoverAttempts;
+  }
+
+  /**
+   * @return the maxRetryAttempts
+   */
+  public int getMaxRetryAttempts() {
+    return maxRetryAttempts;
+  }
+
+  /**
+   * @return the failoverSleepBaseMillis
+   */
+  public int getFailoverSleepBaseMillis() {
+    return failoverSleepBaseMillis;
+  }
+
+  /**
+   * @return the failoverSleepMaxMillis
+   */
+  public int getFailoverSleepMaxMillis() {
+    return failoverSleepMaxMillis;
+  }
+
+  /**
+   * @return the maxBlockAcquireFailures
+   */
+  public int getMaxBlockAcquireFailures() {
+    return maxBlockAcquireFailures;
+  }
+
+  /**
+   * @return the datanodeSocketWriteTimeout
+   */
+  public int getDatanodeSocketWriteTimeout() {
+    return datanodeSocketWriteTimeout;
+  }
+
+  /**
+   * @return the ioBufferSize
+   */
+  public int getIoBufferSize() {
+    return ioBufferSize;
+  }
+
+  /**
+   * @return the defaultChecksumOpt
+   */
+  public ChecksumOpt getDefaultChecksumOpt() {
+    return defaultChecksumOpt;
+  }
+
+  /**
+   * @return the writePacketSize
+   */
+  public int getWritePacketSize() {
+    return writePacketSize;
+  }
+
+  /**
+   * @return the writeMaxPackets
+   */
+  public int getWriteMaxPackets() {
+    return writeMaxPackets;
+  }
+
+  /**
+   * @return the writeByteArrayManagerConf
+   */
+  public ByteArrayManager.Conf getWriteByteArrayManagerConf() {
+    return writeByteArrayManagerConf;
+  }
+
+  /**
+   * @return the socketTimeout
+   */
+  public int getSocketTimeout() {
+    return socketTimeout;
+  }
+
+  /**
+   * @return the excludedNodesCacheExpiry
+   */
+  public long getExcludedNodesCacheExpiry() {
+    return excludedNodesCacheExpiry;
+  }
+
+  /**
+   * @return the timeWindow
+   */
+  public int getTimeWindow() {
+    return timeWindow;
+  }
+
+  /**
+   * @return the numCachedConnRetry
+   */
+  public int getNumCachedConnRetry() {
+    return numCachedConnRetry;
+  }
+
+  /**
+   * @return the numBlockWriteRetry
+   */
+  public int getNumBlockWriteRetry() {
+    return numBlockWriteRetry;
+  }
+
+  /**
+   * @return the numBlockWriteLocateFollowingRetry
+   */
+  public int getNumBlockWriteLocateFollowingRetry() {
+    return numBlockWriteLocateFollowingRetry;
+  }
+
+  /**
+   * @return the defaultBlockSize
+   */
+  public long getDefaultBlockSize() {
+    return defaultBlockSize;
+  }
+
+  /**
+   * @return the prefetchSize
+   */
+  public long getPrefetchSize() {
+    return prefetchSize;
+  }
+
+  /**
+   * @return the defaultReplication
+   */
+  public short getDefaultReplication() {
+    return defaultReplication;
+  }
+
+  /**
+   * @return the taskId
+   */
+  public String getTaskId() {
+    return taskId;
+  }
+
+  /**
+   * @return the uMask
+   */
+  public FsPermission getUMask() {
+    return uMask;
+  }
+
+  /**
+   * @return the connectToDnViaHostname
+   */
+  public boolean isConnectToDnViaHostname() {
+    return connectToDnViaHostname;
+  }
+
+  /**
+   * @return the hdfsBlocksMetadataEnabled
+   */
+  public boolean isHdfsBlocksMetadataEnabled() {
+    return hdfsBlocksMetadataEnabled;
+  }
+
+  /**
+   * @return the fileBlockStorageLocationsNumThreads
+   */
+  public int getFileBlockStorageLocationsNumThreads() {
+    return fileBlockStorageLocationsNumThreads;
+  }
+
+  /**
+   * @return the getFileBlockStorageLocationsTimeoutMs
+   */
+  public int getFileBlockStorageLocationsTimeoutMs() {
+    return fileBlockStorageLocationsTimeoutMs;
+  }
+
+  /**
+   * @return the retryTimesForGetLastBlockLength
+   */
+  public int getRetryTimesForGetLastBlockLength() {
+    return retryTimesForGetLastBlockLength;
+  }
+
+  /**
+   * @return the retryIntervalForGetLastBlockLength
+   */
+  public int getRetryIntervalForGetLastBlockLength() {
+    return retryIntervalForGetLastBlockLength;
+  }
+
+  /**
+   * @return the datanodeRestartTimeout
+   */
+  public long getDatanodeRestartTimeout() {
+    return datanodeRestartTimeout;
+  }
+
+  /**
+   * @return the slowIoWarningThresholdMs
+   */
+  public long getSlowIoWarningThresholdMs() {
+    return slowIoWarningThresholdMs;
+  }
+
+  /**
+   * @return the shortCircuitConf
+   */
+  public ShortCircuitConf getShortCircuitConf() {
+    return shortCircuitConf;
+  }
+
+  public static class ShortCircuitConf {
+    private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class);
+
+    private final int socketCacheCapacity;
+    private final long socketCacheExpiry;
+
+    private final boolean useLegacyBlockReader;
+    private final boolean useLegacyBlockReaderLocal;
+    private final String domainSocketPath;
+    private final boolean skipShortCircuitChecksums;
+
+    private final int shortCircuitBufferSize;
+    private final boolean shortCircuitLocalReads;
+    private final boolean domainSocketDataTraffic;
+    private final int shortCircuitStreamsCacheSize;
+    private final long shortCircuitStreamsCacheExpiryMs; 
+    private final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
+    
+    private final boolean shortCircuitMmapEnabled;
+    private final int shortCircuitMmapCacheSize;
+    private final long shortCircuitMmapCacheExpiryMs;
+    private final long shortCircuitMmapCacheRetryTimeout;
+    private final long shortCircuitCacheStaleThresholdMs;
+
+    private final long keyProviderCacheExpiryMs;
+
+    @VisibleForTesting
+    public BlockReaderFactory.FailureInjector brfFailureInjector =
+        new BlockReaderFactory.FailureInjector();
+
+    public ShortCircuitConf(Configuration conf) {
+      socketCacheCapacity = conf.getInt(
+          DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
+          DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
+      socketCacheExpiry = conf.getLong(
+          DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
+          DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
+
+      useLegacyBlockReader = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
+          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
+      useLegacyBlockReaderLocal = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
+          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
+      shortCircuitLocalReads = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
+      domainSocketDataTraffic = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
+          DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
+      domainSocketPath = conf.getTrimmed(
+          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
+          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
+            + " = " + useLegacyBlockReaderLocal);
+        LOG.debug(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY
+            + " = " + shortCircuitLocalReads);
+        LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
+            + " = " + domainSocketDataTraffic);
+        LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
+            + " = " + domainSocketPath);
+      }
+
+      skipShortCircuitChecksums = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
+      shortCircuitBufferSize = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY,
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT);
+      shortCircuitStreamsCacheSize = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY,
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT);
+      shortCircuitStreamsCacheExpiryMs = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
+          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT);
+      shortCircuitMmapEnabled = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED,
+          DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED_DEFAULT);
+      shortCircuitMmapCacheSize = conf.getInt(
+          DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE,
+          DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT);
+      shortCircuitMmapCacheExpiryMs = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS,
+          DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT);
+      shortCircuitMmapCacheRetryTimeout = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS,
+          DFSConfigKeys.DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS_DEFAULT);
+      shortCircuitCacheStaleThresholdMs = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS,
+          DFSConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS_DEFAULT);
+      shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
+          DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
+          DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
+
+      keyProviderCacheExpiryMs = conf.getLong(
+          DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
+          DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
+    }
+
+    /**
+     * @return the socketCacheCapacity
+     */
+    public int getSocketCacheCapacity() {
+      return socketCacheCapacity;
+    }
+
+    /**
+     * @return the socketCacheExpiry
+     */
+    public long getSocketCacheExpiry() {
+      return socketCacheExpiry;
+    }
+
+    public boolean isUseLegacyBlockReaderLocal() {
+      return useLegacyBlockReaderLocal;
+    }
+
+    public String getDomainSocketPath() {
+      return domainSocketPath;
+    }
+
+    public boolean isShortCircuitLocalReads() {
+      return shortCircuitLocalReads;
+    }
+
+    public boolean isDomainSocketDataTraffic() {
+      return domainSocketDataTraffic;
+    }
+    /**
+     * @return the useLegacyBlockReader
+     */
+    public boolean isUseLegacyBlockReader() {
+      return useLegacyBlockReader;
+    }
+
+    /**
+     * @return the skipShortCircuitChecksums
+     */
+    public boolean isSkipShortCircuitChecksums() {
+      return skipShortCircuitChecksums;
+    }
+
+    /**
+     * @return the shortCircuitBufferSize
+     */
+    public int getShortCircuitBufferSize() {
+      return shortCircuitBufferSize;
+    }
+
+    /**
+     * @return the shortCircuitStreamsCacheSize
+     */
+    public int getShortCircuitStreamsCacheSize() {
+      return shortCircuitStreamsCacheSize;
+    }
+
+    /**
+     * @return the shortCircuitStreamsCacheExpiryMs
+     */
+    public long getShortCircuitStreamsCacheExpiryMs() {
+      return shortCircuitStreamsCacheExpiryMs;
+    }
+
+    /**
+     * @return the shortCircuitSharedMemoryWatcherInterruptCheckMs
+     */
+    public int getShortCircuitSharedMemoryWatcherInterruptCheckMs() {
+      return shortCircuitSharedMemoryWatcherInterruptCheckMs;
+    }
+
+    /**
+     * @return the shortCircuitMmapEnabled
+     */
+    public boolean isShortCircuitMmapEnabled() {
+      return shortCircuitMmapEnabled;
+    }
+
+    /**
+     * @return the shortCircuitMmapCacheSize
+     */
+    public int getShortCircuitMmapCacheSize() {
+      return shortCircuitMmapCacheSize;
+    }
+
+    /**
+     * @return the shortCircuitMmapCacheExpiryMs
+     */
+    public long getShortCircuitMmapCacheExpiryMs() {
+      return shortCircuitMmapCacheExpiryMs;
+    }
+
+    /**
+     * @return the shortCircuitMmapCacheRetryTimeout
+     */
+    public long getShortCircuitMmapCacheRetryTimeout() {
+      return shortCircuitMmapCacheRetryTimeout;
+    }
+
+    /**
+     * @return the shortCircuitCacheStaleThresholdMs
+     */
+    public long getShortCircuitCacheStaleThresholdMs() {
+      return shortCircuitCacheStaleThresholdMs;
+    }
+
+    /**
+     * @return the keyProviderCacheExpiryMs
+     */
+    public long getKeyProviderCacheExpiryMs() {
+      return keyProviderCacheExpiryMs;
+    }
+
+    public String confAsString() {
+      StringBuilder builder = new StringBuilder();
+      builder.append("shortCircuitStreamsCacheSize = ").
+        append(shortCircuitStreamsCacheSize).
+        append(", shortCircuitStreamsCacheExpiryMs = ").
+        append(shortCircuitStreamsCacheExpiryMs).
+        append(", shortCircuitMmapCacheSize = ").
+        append(shortCircuitMmapCacheSize).
+        append(", shortCircuitMmapCacheExpiryMs = ").
+        append(shortCircuitMmapCacheExpiryMs).
+        append(", shortCircuitMmapCacheRetryTimeout = ").
+        append(shortCircuitMmapCacheRetryTimeout).
+        append(", shortCircuitCacheStaleThresholdMs = ").
+        append(shortCircuitCacheStaleThresholdMs).
+        append(", socketCacheCapacity = ").
+        append(socketCacheCapacity).
+        append(", socketCacheExpiry = ").
+        append(socketCacheExpiry).
+        append(", shortCircuitLocalReads = ").
+        append(shortCircuitLocalReads).
+        append(", useLegacyBlockReaderLocal = ").
+        append(useLegacyBlockReaderLocal).
+        append(", domainSocketDataTraffic = ").
+        append(domainSocketDataTraffic).
+        append(", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").
+        append(shortCircuitSharedMemoryWatcherInterruptCheckMs).
+        append(", keyProviderCacheExpiryMs = ").
+        append(keyProviderCacheExpiryMs);
+
+      return builder.toString();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
index 5fd31a9..fadb2f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
@@ -26,14 +26,14 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSClient.Conf;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
 import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.util.PerformanceAdvisory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
-import org.apache.hadoop.util.PerformanceAdvisory;
 
 public class DomainSocketFactory {
   private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class);
@@ -95,7 +95,7 @@ public class DomainSocketFactory {
       .expireAfterWrite(10, TimeUnit.MINUTES)
       .build();
 
-  public DomainSocketFactory(Conf conf) {
+  public DomainSocketFactory(ShortCircuitConf conf) {
     final String feature;
     if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal()))
{
       feature = "The short-circuit local reads feature";
@@ -129,7 +129,7 @@ public class DomainSocketFactory {
    *
    * @return             Information about the socket path.
    */
-  public PathInfo getPathInfo(InetSocketAddress addr, DFSClient.Conf conf) {
+  public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
     // If there is no domain socket path configured, we can't use domain
     // sockets.
     if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index d1ec3b8..27a9ef2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
 import org.apache.hadoop.hdfs.net.DomainPeer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -359,6 +360,17 @@ public class ShortCircuitCache implements Closeable {
             DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT));
   }
 
+  public static ShortCircuitCache fromConf(ShortCircuitConf conf) {
+    return new ShortCircuitCache(
+        conf.getShortCircuitStreamsCacheSize(),
+        conf.getShortCircuitStreamsCacheExpiryMs(),
+        conf.getShortCircuitMmapCacheSize(),
+        conf.getShortCircuitMmapCacheExpiryMs(),
+        conf.getShortCircuitMmapCacheRetryTimeout(),
+        conf.getShortCircuitCacheStaleThresholdMs(),
+        conf.getShortCircuitSharedMemoryWatcherInterruptCheckMs());
+  }
+
   public ShortCircuitCache(int maxTotalSize, long maxNonMmappedEvictableLifespanMs,
       int maxEvictableMmapedSize, long maxEvictableMmapedLifespanMs,
       long mmapRetryTimeoutMs, long staleThresholdMs, int shmInterruptCheckMs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index 296c8d2..6d644ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -42,7 +42,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.ClientContext;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -359,7 +359,7 @@ public class TestEnhancedByteBufferAccess {
     fsIn.close();
     fsIn = fs.open(TEST_PATH);
     final ShortCircuitCache cache = ClientContext.get(
-        CONTEXT, new DFSClient.Conf(conf)). getShortCircuitCache();
+        CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
     cache.accept(new CountingVisitor(0, 5, 5, 0));
     results[0] = fsIn.read(null, BLOCK_SIZE,
         EnumSet.of(ReadOption.SKIP_CHECKSUMS));
@@ -662,7 +662,7 @@ public class TestEnhancedByteBufferAccess {
     final ExtendedBlock firstBlock =
         DFSTestUtil.getFirstBlock(fs, TEST_PATH);
     final ShortCircuitCache cache = ClientContext.get(
-        CONTEXT, new DFSClient.Conf(conf)). getShortCircuitCache();
+        CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
     waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
     // Uncache the replica
     fs.removeCacheDirective(directiveId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
index 29c32f5..ab3515e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
@@ -187,7 +188,7 @@ public class TestBlockReaderLocal {
               Time.now(), shm.allocAndRegisterSlot(
                   ExtendedBlockId.fromExtendedBlock(block)));
       blockReaderLocal = new BlockReaderLocal.Builder(
-              new DFSClient.Conf(conf)).
+              new DfsClientConf.ShortCircuitConf(conf)).
           setFilename(TEST_PATH.getName()).
           setBlock(block).
           setShortCircuitReplica(replica).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 5c0208b..94b2411 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -300,7 +300,7 @@ public class TestDFSClientRetries {
       NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
       NamenodeProtocols spyNN = spy(preSpyNN);
       DFSClient client = new DFSClient(null, spyNN, conf, null);
-      int maxBlockAcquires = client.getMaxBlockAcquireFailures();
+      int maxBlockAcquires = client.getConf().getMaxBlockAcquireFailures();
       assertTrue(maxBlockAcquires > 0);
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index a410e74..478f7e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -113,7 +114,7 @@ public class TestDFSOutputStream {
 
   @Test
   public void testCongestionBackoff() throws IOException {
-    DFSClient.Conf dfsClientConf = mock(DFSClient.Conf.class);
+    DfsClientConf dfsClientConf = mock(DfsClientConf.class);
     DFSClient client = mock(DFSClient.class);
     when(client.getConf()).thenReturn(dfsClientConf);
     client.clientRunning = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
index 11cbcad..f091db7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertSame;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -59,13 +60,13 @@ public class TestLeaseRenewer {
 }
  
   private DFSClient createMockClient() {
+    final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class);
+    Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout();
+
     DFSClient mock = Mockito.mock(DFSClient.class);
-    Mockito.doReturn(true)
-      .when(mock).isClientRunning();
-    Mockito.doReturn((int)FAST_GRACE_PERIOD)
-      .when(mock).getHdfsTimeout();
-    Mockito.doReturn("myclient")
-      .when(mock).getClientName();
+    Mockito.doReturn(true).when(mock).isClientRunning();
+    Mockito.doReturn(mockConf).when(mock).getConf();
+    Mockito.doReturn("myclient").when(mock).getClientName();
     return mock;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
index 23e2a7a..05698ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
-import org.junit.Assume;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -325,7 +324,7 @@ public class TestParallelReadUtil {
       testInfo.filepath = new Path("/TestParallelRead.dat." + i);
       testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
       testInfo.dis = dfsClient.open(testInfo.filepath.toString(),
-          dfsClient.getConf().ioBufferSize, verifyChecksums);
+          dfsClient.getConf().getIoBufferSize(), verifyChecksums);
 
       for (int j = 0; j < nWorkerEach; ++j) {
         workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 84e5c82..c280027 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -146,7 +147,7 @@ public class TestBlockTokenWithDFS {
       DatanodeInfo[] nodes = lblock.getLocations();
       targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
 
-      blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
+      blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
           setFileName(BlockReaderFactory.getFileName(targetAddr, 
                         "test-blockpoolid", block.getBlockId())).
           setBlock(block).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 41e8d7b..0a90947 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -40,12 +40,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
 import org.apache.hadoop.hdfs.ClientContext;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -405,7 +405,7 @@ public class TestDataNodeVolumeFailure {
    
     targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
 
-    BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
+    BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
       setInetSocketAddress(targetAddr).
       setBlock(block).
       setFileName(BlockReaderFactory.getFileName(targetAddr,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1113aca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7d26dee..e38b97b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -657,7 +657,7 @@ public class TestShortCircuitCache {
 
     // The second read should fail, and we should only have 1 segment and 1 slot
     // left.
-    fs.getClient().getConf().brfFailureInjector =
+    fs.getClient().getConf().getShortCircuitConf().brfFailureInjector =
         new TestCleanupFailureInjector();
     try {
       DFSTestUtil.readFileBuffer(fs, TEST_PATH2);


Mime
View raw message