hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sun...@apache.org
Subject [44/51] [partial] hadoop git commit: HADOOP-15791. Remove Ozone related sources from the 3.2 branch. Contributed by Elek, Marton.
Date Fri, 05 Oct 2018 12:56:39 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
deleted file mode 100644
index 85cebed..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.iq80.leveldb.Options;
-import org.rocksdb.BlockBasedTableConfig;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-
-/**
- * Builder for metadata store.
- */
-public class MetadataStoreBuilder {
-
-  @VisibleForTesting
-  static final Logger LOG =
-      LoggerFactory.getLogger(MetadataStoreBuilder.class);
-  private File dbFile;
-  private long cacheSize;
-  private boolean createIfMissing = true;
-  private Configuration conf;
-  private String dbType;
-
-  public static MetadataStoreBuilder newBuilder() {
-    return new MetadataStoreBuilder();
-  }
-
-  public MetadataStoreBuilder setDbFile(File dbPath) {
-    this.dbFile = dbPath;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCacheSize(long cache) {
-    this.cacheSize = cache;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) {
-    this.createIfMissing = doCreate;
-    return this;
-  }
-
-  public MetadataStoreBuilder setConf(Configuration configuration) {
-    this.conf = configuration;
-    return this;
-  }
-
-  /**
-   * Set the container DB Type.
-   * @param type
-   * @return MetadataStoreBuilder
-   */
-  public MetadataStoreBuilder setDBType(String type) {
-    this.dbType = type;
-    return this;
-  }
-
-
-  public MetadataStore build() throws IOException {
-    if (dbFile == null) {
-      throw new IllegalArgumentException("Failed to build metadata store, "
-          + "dbFile is required but not found");
-    }
-
-    // Build db store based on configuration
-    MetadataStore store = null;
-
-    if(dbType == null) {
-      LOG.debug("dbType is null, using ");
-      dbType = conf == null ?
-          OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
-          conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-              OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
-      LOG.debug("dbType is null, using dbType {} from ozone configuration",
-          dbType);
-    } else {
-      LOG.debug("Using dbType {} for metastore", dbType);
-    }
-    if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) {
-      Options options = new Options();
-      options.createIfMissing(createIfMissing);
-      if (cacheSize > 0) {
-        options.cacheSize(cacheSize);
-      }
-      store = new LevelDBStore(dbFile, options);
-    } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
-      org.rocksdb.Options opts = new org.rocksdb.Options();
-      opts.setCreateIfMissing(createIfMissing);
-
-      if (cacheSize > 0) {
-        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
-        tableConfig.setBlockCacheSize(cacheSize);
-        opts.setTableFormatConfig(tableConfig);
-      }
-
-      String rocksDbStat = conf == null ?
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT :
-          conf.getTrimmed(OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-              OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
-      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
-        Statistics statistics = new Statistics();
-        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
-        opts = opts.setStatistics(statistics);
-
-      }
-      store = new RocksDBStore(dbFile, opts);
-    } else {
-      throw new IllegalArgumentException("Invalid argument for "
-          + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
-          + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
-          + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
-          + ", but met " + dbType);
-    }
-    return store;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
deleted file mode 100644
index 379d9e9..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
-import org.rocksdb.DbPath;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.RocksIterator;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * RocksDB implementation of ozone metadata store.
- */
-public class RocksDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBStore.class);
-
-  private RocksDB db = null;
-  private File dbLocation;
-  private WriteOptions writeOptions;
-  private Options dbOptions;
-  private ObjectName statMBeanName;
-
-  public RocksDBStore(File dbFile, Options options)
-      throws IOException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    RocksDB.loadLibrary();
-    dbOptions = options;
-    dbLocation = dbFile;
-    writeOptions = new WriteOptions();
-    try {
-
-      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
-      if (dbOptions.statistics() != null) {
-
-        Map<String, String> jmxProperties = new HashMap<String, String>();
-        jmxProperties.put("dbName", dbFile.getName());
-        statMBeanName = MBeans.register("Ozone", "RocksDbStore", jmxProperties,
-            new RocksDBStoreMBean(dbOptions.statistics()));
-        if (statMBeanName == null) {
-          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
-              dbFile.getAbsolutePath());
-        }
-      }
-    } catch (RocksDBException e) {
-      throw new IOException(
-          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("RocksDB successfully opened.");
-      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
-      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
-      LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
-      LOG.debug("[Option] compressionType= {}", options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
-    }
-  }
-
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    try {
-      db.put(writeOptions, key, value);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to put key-value to metadata store", e);
-    }
-  }
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      it.seekToFirst();
-      return !it.isValid();
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    try {
-      return db.get(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to get the value for the given key", e);
-    }
-  }
-
-  @Override
-  public void delete(byte[] key) throws IOException {
-    try {
-      db.delete(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to delete the given key", e);
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  private List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential,
-      MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    List<Map.Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (startKey == null) {
-        it.seekToFirst();
-      } else {
-        if(get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        it.seek(startKey);
-      }
-      while(it.isValid() && result.size() < count) {
-        byte[] currentKey = it.key();
-        byte[] currentValue = it.value();
-
-        it.prev();
-        final byte[] prevKey = it.isValid() ? it.key() : null;
-
-        it.seek(currentKey);
-        it.next();
-        final byte[] nextKey = it.isValid() ? it.key() : null;
-
-        if (filters == null) {
-          result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-              currentValue));
-        } else {
-          if (Arrays.asList(filters).stream()
-              .allMatch(entry -> entry.filterKey(prevKey,
-                  currentKey, nextKey))) {
-            result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-                currentValue));
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-      long end = System.currentTimeMillis();
-      long timeConsumed = end - start;
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              LOG.debug(
-                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                  filter.getClass().getSimpleName(), filter.getKeysScannedNum(),
-                  filter.getKeysHintedNum());
-            }
-          }
-        }
-        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-            + " result length is {}.", timeConsumed, result.size());
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation)
-      throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = new WriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.delete(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeOptions, writeBatch);
-      } catch (RocksDBException e) {
-        throw toIOException("Batch write operation failed", e);
-      }
-    }
-  }
-
-  @Override
-  public void compactDB() throws IOException {
-    if (db != null) {
-      try {
-        db.compactRange();
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to compact db", e);
-      }
-    }
-  }
-
-  private void deleteQuietly(File fileOrDir) {
-    if (fileOrDir != null && fileOrDir.exists()) {
-      try {
-        FileUtils.forceDelete(fileOrDir);
-      } catch (IOException e) {
-        LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e);
-      }
-    }
-  }
-
-  @Override
-  public void destroy() throws IOException {
-    // Make sure db is closed.
-    close();
-
-    // There is no destroydb java API available,
-    // equivalently we can delete all db directories.
-    deleteQuietly(dbLocation);
-    deleteQuietly(new File(dbOptions.dbLogDir()));
-    deleteQuietly(new File(dbOptions.walDir()));
-    List<DbPath> dbPaths = dbOptions.dbPaths();
-    if (dbPaths != null) {
-      dbPaths.forEach(dbPath -> {
-        deleteQuietly(new File(dbPath.toString()));
-      });
-    }
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.isValid()) {
-        return null;
-      }
-
-      switch (offset) {
-      case 0:
-        break;
-      case 1:
-        it.next();
-        break;
-      case -1:
-        it.prev();
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-      return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null;
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from != null) {
-        it.seek(from);
-      } else {
-        it.seekToFirst();
-      }
-      while (it.isValid()) {
-        if (!consumer.consume(it.key(), it.value())) {
-          break;
-        }
-        it.next();
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (statMBeanName != null) {
-      MBeans.unregister(statMBeanName);
-      statMBeanName = null;
-    }
-    if (db != null) {
-      db.close();
-    }
-
-  }
-
-  @VisibleForTesting
-  protected ObjectName getStatMBeanName() {
-    return statMBeanName;
-  }
-
-  @Override
-  public MetaStoreIterator<KeyValue> iterator() {
-    return new RocksDBStoreIterator(db.newIterator());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
deleted file mode 100644
index 6e9b695..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.utils;
-
-import org.rocksdb.RocksIterator;
-
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.utils.MetadataStore.KeyValue;
-
-/**
- * RocksDB store iterator.
- */
-public class RocksDBStoreIterator implements MetaStoreIterator<KeyValue> {
-
-  private RocksIterator rocksDBIterator;
-
-  public RocksDBStoreIterator(RocksIterator iterator) {
-    this.rocksDBIterator = iterator;
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public boolean hasNext() {
-    return rocksDBIterator.isValid();
-  }
-
-  @Override
-  public KeyValue next() {
-    if (rocksDBIterator.isValid()) {
-      KeyValue value = KeyValue.create(rocksDBIterator.key(), rocksDBIterator
-          .value());
-      rocksDBIterator.next();
-      return value;
-    }
-    throw new NoSuchElementException("RocksDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    rocksDBIterator.seekToLast();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
deleted file mode 100644
index 88c093e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.rocksdb.HistogramData;
-import org.rocksdb.HistogramType;
-import org.rocksdb.Statistics;
-import org.rocksdb.TickerType;
-
-import javax.management.Attribute;
-import javax.management.AttributeList;
-import javax.management.AttributeNotFoundException;
-import javax.management.DynamicMBean;
-import javax.management.InvalidAttributeValueException;
-import javax.management.MBeanAttributeInfo;
-import javax.management.MBeanException;
-import javax.management.MBeanInfo;
-import javax.management.ReflectionException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Adapter JMX bean to publish all the Rocksdb metrics.
- */
-public class RocksDBStoreMBean implements DynamicMBean {
-
-  private Statistics statistics;
-
-  private Set<String> histogramAttributes = new HashSet<>();
-
-  public RocksDBStoreMBean(Statistics statistics) {
-    this.statistics = statistics;
-    histogramAttributes.add("Average");
-    histogramAttributes.add("Median");
-    histogramAttributes.add("Percentile95");
-    histogramAttributes.add("Percentile99");
-    histogramAttributes.add("StandardDeviation");
-  }
-
-  @Override
-  public Object getAttribute(String attribute)
-      throws AttributeNotFoundException, MBeanException, ReflectionException {
-    for (String histogramAttribute : histogramAttributes) {
-      if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) {
-        String keyName = attribute
-            .substring(0, attribute.length() - histogramAttribute.length() - 1);
-        try {
-          HistogramData histogram =
-              statistics.getHistogramData(HistogramType.valueOf(keyName));
-          try {
-            Method method =
-                HistogramData.class.getMethod("get" + histogramAttribute);
-            return method.invoke(histogram);
-          } catch (Exception e) {
-            throw new ReflectionException(e,
-                "Can't read attribute " + attribute);
-          }
-        } catch (IllegalArgumentException exception) {
-          throw new AttributeNotFoundException(
-              "No such attribute in RocksDB stats: " + attribute);
-        }
-      }
-    }
-    try {
-      return statistics.getTickerCount(TickerType.valueOf(attribute));
-    } catch (IllegalArgumentException ex) {
-      throw new AttributeNotFoundException(
-          "No such attribute in RocksDB stats: " + attribute);
-    }
-  }
-
-  @Override
-  public void setAttribute(Attribute attribute)
-      throws AttributeNotFoundException, InvalidAttributeValueException,
-      MBeanException, ReflectionException {
-
-  }
-
-  @Override
-  public AttributeList getAttributes(String[] attributes) {
-    AttributeList result = new AttributeList();
-    for (String attributeName : attributes) {
-      try {
-        Object value = getAttribute(attributeName);
-        result.add(value);
-      } catch (Exception e) {
-        //TODO
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public AttributeList setAttributes(AttributeList attributes) {
-    return null;
-  }
-
-  @Override
-  public Object invoke(String actionName, Object[] params, String[] signature)
-      throws MBeanException, ReflectionException {
-    return null;
-  }
-
-  @Override
-  public MBeanInfo getMBeanInfo() {
-
-    List<MBeanAttributeInfo> attributes = new ArrayList<>();
-    for (TickerType tickerType : TickerType.values()) {
-      attributes.add(new MBeanAttributeInfo(tickerType.name(), "long",
-          "RocksDBStat: " + tickerType.name(), true, false, false));
-    }
-    for (HistogramType histogramType : HistogramType.values()) {
-      for (String histogramAttribute : histogramAttributes) {
-        attributes.add(new MBeanAttributeInfo(
-            histogramType.name() + "_" + histogramAttribute.toUpperCase(),
-            "long", "RocksDBStat: " + histogramType.name(), true, false,
-            false));
-      }
-    }
-
-    return new MBeanInfo("", "RocksDBStat",
-        attributes.toArray(new MBeanAttributeInfo[0]), null, null, null);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
deleted file mode 100644
index 94370b1..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import com.google.common.base.Preconditions;
-import org.eclipse.jetty.util.StringUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.DBOptions;
-import org.rocksdb.Env;
-import org.rocksdb.OptionsUtil;
-import org.rocksdb.RocksDBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-
-/**
- * A Class that controls the standard config options of RocksDB.
- * <p>
- * Important : Some of the functions in this file are magic functions designed
- * for the use of OZONE developers only. Due to that this information is
- * documented in this files only and is *not* intended for end user consumption.
- * Please do not use this information to tune your production environments.
- * Please remember the SpiderMan principal; with great power comes great
- * responsibility.
- */
-public final class DBConfigFromFile {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DBConfigFromFile.class);
-
-  public static final String CONFIG_DIR = "HADOOP_CONF_DIR";
-
-  private DBConfigFromFile() {
-  }
-
-  public static File getConfigLocation() throws IOException {
-    String path = System.getenv(CONFIG_DIR);
-
-    // Make testing easy.
-    // If there is No Env. defined, let us try to read the JVM property
-    if (StringUtil.isBlank(path)) {
-      path = System.getProperty(CONFIG_DIR);
-    }
-
-    if (StringUtil.isBlank(path)) {
-      LOG.debug("Unable to find the configuration directory. "
-          + "Please make sure that HADOOP_CONF_DIR is setup correctly.");
-    }
-    if(StringUtil.isBlank(path)){
-      return null;
-    }
-    return new File(path);
-
-  }
-
-  /**
-   * This class establishes a magic pattern where we look for DBFile.ini as the
-   * options for RocksDB.
-   *
-   * @param dbFileName - The DBFile Name. For example, OzoneManager.db
-   * @return Name of the DB File options
-   */
-  public static String getOptionsFileNameFromDB(String dbFileName) {
-    Preconditions.checkNotNull(dbFileName);
-    return dbFileName + ".ini";
-  }
-
-  /**
-   * One of the Magic functions designed for the use of Ozone Developers *ONLY*.
-   * This function takes the name of DB file and looks up the a .ini file that
-   * follows the ROCKSDB config format and uses that file for DBOptions and
-   * Column family Options. The Format for this file is specified by RockDB.
-   * <p>
-   * Here is a sample config from RocksDB sample Repo.
-   * <p>
-   * https://github.com/facebook/rocksdb/blob/master/examples
-   * /rocksdb_option_file_example.ini
-   * <p>
-   * We look for a specific pattern, say OzoneManager.db will have its configs
-   * specified in OzoneManager.db.ini. This option is used only by the
-   * performance testing group to allow tuning of all parameters freely.
-   * <p>
-   * For the end users we offer a set of Predefined options that is easy to use
-   * and the user does not need to become an expert in RockDB config.
-   * <p>
-   * This code assumes the .ini file is placed in the same directory as normal
-   * config files. That is in $HADOOP_DIR/etc/hadoop. For example, if we want to
-   * control OzoneManager.db configs from a file, we need to create a file
-   * called OzoneManager.db.ini and place that file in $HADOOP_DIR/etc/hadoop.
-   *
-   * @param dbFileName - The DB File Name, for example, OzoneManager.db.
-   * @param cfDescs - ColumnFamily Handles.
-   * @return DBOptions, Options to be used for opening/creating the DB.
-   * @throws IOException
-   */
-  public static DBOptions readFromFile(String dbFileName,
-      List<ColumnFamilyDescriptor> cfDescs) throws IOException {
-    Preconditions.checkNotNull(dbFileName);
-    Preconditions.checkNotNull(cfDescs);
-    Preconditions.checkArgument(cfDescs.size() > 0);
-
-    //TODO: Add Documentation on how to support RocksDB Mem Env.
-    Env env = Env.getDefault();
-    DBOptions options = null;
-    File configLocation = getConfigLocation();
-    if(configLocation != null &&
-        StringUtil.isNotBlank(configLocation.toString())){
-      Path optionsFile = Paths.get(configLocation.toString(),
-          getOptionsFileNameFromDB(dbFileName));
-
-      if (optionsFile.toFile().exists()) {
-        options = new DBOptions();
-        try {
-          OptionsUtil.loadOptionsFromFile(optionsFile.toString(),
-              env, options, cfDescs, true);
-
-        } catch (RocksDBException rdEx) {
-          RDBTable.toIOException("Unable to find/open Options file.", rdEx);
-        }
-      }
-    }
-    return options;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBProfile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBProfile.java
deleted file mode 100644
index 4d3d6bc..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBProfile.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.rocksdb.BlockBasedTableConfig;
-import org.rocksdb.BloomFilter;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.CompactionStyle;
-import org.rocksdb.DBOptions;
-
-import java.math.BigDecimal;
-
-/**
- * User visible configs based RocksDB tuning page. Documentation for Options.
- * <p>
- * https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h
- * <p>
- * Most tuning parameters are based on this URL.
- * <p>
- * https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
- */
-public enum DBProfile {
-  //TODO : Add more profiles like TEST etc.
-  SSD {
-    @Override
-    public String toString() {
-      return "DBProfile.SSD";
-    }
-
-    @Override
-    public ColumnFamilyOptions getColumnFamilyOptions() {
-
-      // Set BlockCacheSize to 256 MB. This should not be an issue for HADOOP.
-      final long blockCacheSize = toLong(StorageUnit.MB.toBytes(256.00));
-
-      // Set the Default block size to 16KB
-      final long blockSize = toLong(StorageUnit.KB.toBytes(16));
-
-      // Write Buffer Size -- set to 128 MB
-      final long writeBufferSize = toLong(StorageUnit.MB.toBytes(128));
-
-      return new ColumnFamilyOptions()
-          .setLevelCompactionDynamicLevelBytes(true)
-          .setWriteBufferSize(writeBufferSize)
-          .setTableFormatConfig(
-              new BlockBasedTableConfig()
-                  .setBlockCacheSize(blockCacheSize)
-                  .setBlockSize(blockSize)
-                  .setCacheIndexAndFilterBlocks(true)
-                  .setPinL0FilterAndIndexBlocksInCache(true)
-                  .setFilter(new BloomFilter()));
-    }
-
-    @Override
-    public DBOptions getDBOptions() {
-      final int maxBackgroundCompactions = 4;
-      final int maxBackgroundFlushes = 2;
-      final long bytesPerSync = toLong(StorageUnit.MB.toBytes(1.00));
-      final boolean createIfMissing = true;
-      final boolean createMissingColumnFamilies = true;
-      return new DBOptions()
-          .setIncreaseParallelism(Runtime.getRuntime().availableProcessors())
-          .setMaxBackgroundCompactions(maxBackgroundCompactions)
-          .setMaxBackgroundFlushes(maxBackgroundFlushes)
-          .setBytesPerSync(bytesPerSync)
-          .setCreateIfMissing(createIfMissing)
-          .setCreateMissingColumnFamilies(createMissingColumnFamilies);
-    }
-
-
-  },
-  DISK {
-    @Override
-    public String toString() {
-      return "DBProfile.DISK";
-    }
-
-    @Override
-    public DBOptions getDBOptions() {
-      final long readAheadSize = toLong(StorageUnit.MB.toBytes(4.00));
-      return SSD.getDBOptions().setCompactionReadaheadSize(readAheadSize);
-    }
-
-    @Override
-    public ColumnFamilyOptions getColumnFamilyOptions() {
-      ColumnFamilyOptions columnFamilyOptions = SSD.getColumnFamilyOptions();
-      columnFamilyOptions.setCompactionStyle(CompactionStyle.LEVEL);
-      return columnFamilyOptions;
-    }
-
-
-  };
-
-  private static long toLong(double value) {
-    BigDecimal temp = new BigDecimal(value);
-    return temp.longValue();
-  }
-
-  public abstract DBOptions getDBOptions();
-
-  public abstract ColumnFamilyOptions getColumnFamilyOptions();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
deleted file mode 100644
index 6947a83..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.rocksdb.WriteBatch;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-/**
- * The DBStore interface provides the ability to create Tables, which store
- * a specific type of Key-Value pair. Some DB interfaces like LevelDB will not
- * be able to do this. In those case a Table creation will map to a default
- * store.
- *
- */
-@InterfaceStability.Evolving
-public interface DBStore extends AutoCloseable {
-
-  /**
-   * Gets an existing TableStore.
-   *
-   * @param name - Name of the TableStore to get
-   * @return - TableStore.
-   * @throws IOException on Failure
-   */
-  Table getTable(String name) throws IOException;
-
-  /**
-   * Lists the Known list of Tables in a DB.
-   *
-   * @return List of Tables, in case of Rocks DB and LevelDB we will return at
-   * least one entry called DEFAULT.
-   * @throws IOException on Failure
-   */
-  ArrayList<Table> listTables() throws IOException;
-
-  /**
-   * Compact the entire database.
-   *
-   * @throws IOException on Failure
-   */
-  void compactDB() throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table.
-   *
-   * @param key - Key to move.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  void move(byte[] key, Table source, Table dest) throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination to the new value.
-   *
-   * @param key - Key to move.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  void move(byte[] key, byte[] value, Table source, Table dest)
-      throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination with the new key name and value.
-   * This is similar to deleting an entry in one table and adding an entry in
-   * another table, here it is done atomically.
-   *
-   * @param sourceKey - Key to move.
-   * @param destKey - Destination key name.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  void move(byte[] sourceKey, byte[] destKey, byte[] value,
-            Table source, Table dest) throws IOException;
-
-  /**
-   * Returns an estimated count of keys in this DB.
-   *
-   * @return long, estimate of keys in the DB.
-   */
-  long getEstimatedKeyCount() throws IOException;
-
-  /**
-   * Writes a transaction into the DB using the default write Options.
-   * @param batch - Batch to write.
-   */
-  void write(WriteBatch batch) throws IOException;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
deleted file mode 100644
index 5ba9b9c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.eclipse.jetty.util.StringUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_DB_PROFILE;
-
-/**
- * DBStore Builder.
- */
-public final class DBStoreBuilder {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DBStoreBuilder.class);
-  private Set<TableConfig> tables;
-  private DBProfile dbProfile;
-  private DBOptions rocksDBOption;
-  private String dbname;
-  private Path dbPath;
-  private List<String> tableNames;
-  private Configuration configuration;
-
-  private DBStoreBuilder(Configuration configuration) {
-    tables = new HashSet<>();
-    tableNames = new LinkedList<>();
-    this.configuration = configuration;
-  }
-
-  public static DBStoreBuilder newBuilder(Configuration configuration) {
-    return new DBStoreBuilder(configuration);
-  }
-
-  public DBStoreBuilder setProfile(DBProfile profile) {
-    dbProfile = profile;
-    return this;
-  }
-
-  public DBStoreBuilder setName(String name) {
-    dbname = name;
-    return this;
-  }
-
-  public DBStoreBuilder addTable(String tableName) {
-    tableNames.add(tableName);
-    return this;
-  }
-
-  public DBStoreBuilder addTable(String tableName, ColumnFamilyOptions option)
-      throws IOException {
-    TableConfig tableConfig = new TableConfig(tableName, option);
-    if (!tables.add(tableConfig)) {
-      String message = "Unable to add the table: " + tableName +
-          ".  Please check if this table name is already in use.";
-      LOG.error(message);
-      throw new IOException(message);
-    }
-    LOG.info("using custom profile for table: {}", tableName);
-    return this;
-  }
-
-  public DBStoreBuilder setDBOption(DBOptions option) {
-    rocksDBOption = option;
-    return this;
-  }
-
-  public DBStoreBuilder setPath(Path path) {
-    Preconditions.checkNotNull(path);
-    dbPath = path;
-    return this;
-  }
-
-  /**
-   * Builds a DBStore instance and returns that.
-   *
-   * @return DBStore
-   */
-  public DBStore build() throws IOException {
-    if(StringUtil.isBlank(dbname) || (dbPath == null)) {
-      LOG.error("Required Parameter missing.");
-      throw new IOException("Required parameter is missing. Please make sure "
-          + "sure Path and DB name is provided.");
-    }
-    processDBProfile();
-    processTables();
-    DBOptions options = getDbProfile();
-    File dbFile = getDBFile();
-    if (!dbFile.getParentFile().exists()) {
-      throw new IOException("The DB destination directory should exist.");
-    }
-    return new RDBStore(dbFile, options, tables);
-  }
-
-  /**
-   * if the DBProfile is not set, we will default to using default from the
-   * config file.
-   */
-  private void processDBProfile() {
-    if (dbProfile == null) {
-      dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE,
-          HDDS_DEFAULT_DB_PROFILE);
-    }
-  }
-
-  private void processTables() throws IOException {
-    if (tableNames.size() > 0) {
-      for (String name : tableNames) {
-        addTable(name, dbProfile.getColumnFamilyOptions());
-        LOG.info("Using default column profile:{} for Table:{}",
-            dbProfile.toString(), name);
-      }
-    }
-    addTable(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-        dbProfile.getColumnFamilyOptions());
-    LOG.info("Using default column profile:{} for Table:{}",
-        dbProfile.toString(),
-        DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY));
-  }
-
-  private DBOptions getDbProfile() {
-    if (rocksDBOption != null) {
-      return rocksDBOption;
-    }
-    DBOptions option = null;
-    if (StringUtil.isNotBlank(dbname)) {
-      List<ColumnFamilyDescriptor> columnFamilyDescriptors = new LinkedList<>();
-
-      for (TableConfig tc : tables) {
-        columnFamilyDescriptors.add(tc.getDescriptor());
-      }
-
-      if (columnFamilyDescriptors.size() > 0) {
-        try {
-          option = DBConfigFromFile.readFromFile(dbname,
-              columnFamilyDescriptors);
-          if(option != null) {
-            LOG.info("Using Configs from {}.ini file", dbname);
-          }
-        } catch (IOException ex) {
-          LOG.info("Unable to read ROCKDB config", ex);
-        }
-      }
-    }
-
-    if (option == null) {
-      LOG.info("Using default options. {}", dbProfile.toString());
-      return dbProfile.getDBOptions();
-    }
-    return option;
-  }
-
-  private File getDBFile() throws IOException {
-    if (dbPath == null) {
-      LOG.error("DB path is required.");
-      throw new IOException("A Path to for DB file is needed.");
-    }
-
-    if (StringUtil.isBlank(dbname)) {
-      LOG.error("DBName is a required.");
-      throw new IOException("A valid DB name is required.");
-    }
-    return Paths.get(dbPath.toString(), dbname).toFile();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
deleted file mode 100644
index 5078b3e..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.utils.RocksDBStoreMBean;
-import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyHandle;
-
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Hashtable;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * RocksDB Store that supports creating Tables in DB.
- */
-public class RDBStore implements DBStore {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RDBStore.class);
-  private final RocksDB db;
-  private final File dbLocation;
-  private final WriteOptions writeOptions;
-  private final DBOptions dbOptions;
-  private final Hashtable<String, ColumnFamilyHandle> handleTable;
-  private ObjectName statMBeanName;
-
-  public RDBStore(File dbFile, DBOptions options, Set<TableConfig> families)
-      throws IOException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    Preconditions.checkNotNull(families);
-    Preconditions.checkArgument(families.size() > 0);
-    handleTable = new Hashtable<>();
-
-    final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
-        new ArrayList<>();
-    final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
-
-    for (TableConfig family : families) {
-      columnFamilyDescriptors.add(family.getDescriptor());
-    }
-
-    dbOptions = options;
-    dbLocation = dbFile;
-    // TODO: Read from the next Config.
-    writeOptions = new WriteOptions();
-
-    try {
-      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(),
-          columnFamilyDescriptors, columnFamilyHandles);
-
-      for (int x = 0; x < columnFamilyHandles.size(); x++) {
-        handleTable.put(
-            DFSUtil.bytes2String(columnFamilyHandles.get(x).getName()),
-            columnFamilyHandles.get(x));
-      }
-
-      if (dbOptions.statistics() != null) {
-        Map<String, String> jmxProperties = new HashMap<>();
-        jmxProperties.put("dbName", dbFile.getName());
-        statMBeanName = MBeans.register("Ozone", "RocksDbStore", jmxProperties,
-            new RocksDBStoreMBean(dbOptions.statistics()));
-        if (statMBeanName == null) {
-          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
-              dbFile.getAbsolutePath());
-        }
-      }
-
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("RocksDB successfully opened.");
-      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
-      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
-      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
-    }
-  }
-
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  @Override
-  public void compactDB() throws IOException {
-    if (db != null) {
-      try {
-        db.compactRange();
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to compact db", e);
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-
-    for (final ColumnFamilyHandle handle : handleTable.values()) {
-      handle.close();
-    }
-
-    if (statMBeanName != null) {
-      MBeans.unregister(statMBeanName);
-      statMBeanName = null;
-    }
-
-    if (db != null) {
-      db.close();
-    }
-
-    if (dbOptions != null) {
-      dbOptions.close();
-    }
-
-    if (writeOptions != null) {
-      writeOptions.close();
-    }
-  }
-
-  @Override
-  public void move(byte[] key, Table source, Table dest) throws IOException {
-    RDBTable sourceTable;
-    RDBTable destTable;
-    if (source instanceof RDBTable) {
-      sourceTable = (RDBTable) source;
-    } else {
-      LOG.error("Unexpected Table type. Expected RocksTable Store for Source.");
-      throw new IOException("Unexpected TableStore Type in source. Expected "
-          + "RocksDBTable.");
-    }
-
-    if (dest instanceof RDBTable) {
-      destTable = (RDBTable) dest;
-    } else {
-      LOG.error("Unexpected Table type. Expected RocksTable Store for Dest.");
-      throw new IOException("Unexpected TableStore Type in dest. Expected "
-          + "RocksDBTable.");
-    }
-    try (WriteBatch batch = new WriteBatch()) {
-      byte[] value = sourceTable.get(key);
-      batch.put(destTable.getHandle(), key, value);
-      batch.delete(sourceTable.getHandle(), key);
-      db.write(writeOptions, batch);
-    } catch (RocksDBException rockdbException) {
-      LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(key));
-      throw toIOException("Unable to move key: " + DFSUtil.bytes2String(key),
-          rockdbException);
-    }
-  }
-
-
-  @Override
-  public void move(byte[] key, byte[] value, Table source,
-      Table dest) throws IOException {
-    move(key, key, value, source, dest);
-  }
-
-  @Override
-  public void move(byte[] sourceKey, byte[] destKey, byte[] value, Table source,
-      Table dest) throws IOException {
-    RDBTable sourceTable;
-    RDBTable destTable;
-    if (source instanceof RDBTable) {
-      sourceTable = (RDBTable) source;
-    } else {
-      LOG.error("Unexpected Table type. Expected RocksTable Store for Source.");
-      throw new IOException("Unexpected TableStore Type in source. Expected "
-          + "RocksDBTable.");
-    }
-
-    if (dest instanceof RDBTable) {
-      destTable = (RDBTable) dest;
-    } else {
-      LOG.error("Unexpected Table type. Expected RocksTable Store for Dest.");
-      throw new IOException("Unexpected TableStore Type in dest. Expected "
-          + "RocksDBTable.");
-    }
-    try (WriteBatch batch = new WriteBatch()) {
-      batch.put(destTable.getHandle(), destKey, value);
-      batch.delete(sourceTable.getHandle(), sourceKey);
-      db.write(writeOptions, batch);
-    } catch (RocksDBException rockdbException) {
-      LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey));
-      throw toIOException("Unable to move key: " +
-              DFSUtil.bytes2String(sourceKey), rockdbException);
-    }
-  }
-
-  @Override
-  public long getEstimatedKeyCount() throws IOException {
-    try {
-      return db.getLongProperty("rocksdb.estimate-num-keys");
-    } catch (RocksDBException e) {
-      throw toIOException("Unable to get the estimated count.", e);
-    }
-  }
-
-  @Override
-  public void write(WriteBatch batch) throws IOException {
-    try {
-      db.write(writeOptions, batch);
-    } catch (RocksDBException e) {
-      throw toIOException("Unable to write the batch.", e);
-    }
-  }
-
-  @VisibleForTesting
-  protected ObjectName getStatMBeanName() {
-    return statMBeanName;
-  }
-
-  @Override
-  public Table getTable(String name) throws IOException {
-    ColumnFamilyHandle handle = handleTable.get(name);
-    if (handle == null) {
-      throw new IOException("No such table in this DB. TableName : " + name);
-    }
-    return new RDBTable(this.db, handle, this.writeOptions);
-  }
-
-  @Override
-  public ArrayList<Table> listTables() throws IOException {
-    ArrayList<Table> returnList = new ArrayList<>();
-    for (ColumnFamilyHandle handle : handleTable.values()) {
-      returnList.add(new RDBTable(db, handle, writeOptions));
-    }
-    return returnList;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStoreIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStoreIterator.java
deleted file mode 100644
index f1f2df6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStoreIterator.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.hadoop.utils.db.Table.KeyValue;
-import org.rocksdb.RocksIterator;
-
-import java.io.IOException;
-import java.util.NoSuchElementException;
-import java.util.function.Consumer;
-
-/**
- * RocksDB store iterator.
- */
-public class RDBStoreIterator implements TableIterator<KeyValue> {
-
-  private RocksIterator rocksDBIterator;
-
-  public RDBStoreIterator(RocksIterator iterator) {
-    this.rocksDBIterator = iterator;
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void forEachRemaining(Consumer<? super KeyValue> action) {
-    while(hasNext()) {
-      action.accept(next());
-    }
-  }
-
-  @Override
-  public boolean hasNext() {
-    return rocksDBIterator.isValid();
-  }
-
-  @Override
-  public Table.KeyValue next() {
-    if (rocksDBIterator.isValid()) {
-      KeyValue value = KeyValue.create(rocksDBIterator.key(), rocksDBIterator
-          .value());
-      rocksDBIterator.next();
-      return value;
-    }
-    throw new NoSuchElementException("RocksDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    rocksDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    rocksDBIterator.seekToLast();
-  }
-
-  @Override
-  public KeyValue seek(byte[] key) {
-    rocksDBIterator.seek(key);
-    if (rocksDBIterator.isValid()) {
-      return KeyValue.create(rocksDBIterator.key(),
-          rocksDBIterator.value());
-    }
-    return null;
-  }
-
-  @Override
-  public void close() throws IOException {
-    rocksDBIterator.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
deleted file mode 100644
index 8cf6b35..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.ReadOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-
-/**
- * RocksDB implementation of ozone metadata store.
- */
-public class RDBTable implements Table {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RDBTable.class);
-
-  private final RocksDB db;
-  private final ColumnFamilyHandle handle;
-  private final WriteOptions writeOptions;
-
-  /**
-   * Constructs a TableStore.
-   *
-   * @param db - DBstore that we are using.
-   * @param handle - ColumnFamily Handle.
-   * @param writeOptions - RocksDB write Options.
-   */
-  public RDBTable(RocksDB db, ColumnFamilyHandle handle,
-      WriteOptions writeOptions) {
-    this.db = db;
-    this.handle = handle;
-    this.writeOptions = writeOptions;
-  }
-
-  /**
-   * Converts RocksDB exception to IOE.
-   * @param msg  - Message to add to exception.
-   * @param e - Original Exception.
-   * @return  IOE.
-   */
-  public static IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  /**
-   * Returns the Column family Handle.
-   *
-   * @return ColumnFamilyHandle.
-   */
-  @Override
-  public ColumnFamilyHandle getHandle() {
-    return handle;
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    try {
-      db.put(handle, writeOptions, key, value);
-    } catch (RocksDBException e) {
-      LOG.error("Failed to write to DB. Key: {}", new String(key,
-          StandardCharsets.UTF_8));
-      throw toIOException("Failed to put key-value to metadata "
-          + "store", e);
-    }
-  }
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    try (TableIterator<KeyValue> keyIter = iterator()) {
-      keyIter.seekToFirst();
-      return !keyIter.hasNext();
-    }
-  }
-
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    try {
-      return db.get(handle, key);
-    } catch (RocksDBException e) {
-      throw toIOException(
-          "Failed to get the value for the given key", e);
-    }
-  }
-
-  @Override
-  public void delete(byte[] key) throws IOException {
-    try {
-      db.delete(handle, key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to delete the given key", e);
-    }
-  }
-
-  @Override
-  public void writeBatch(WriteBatch operation) throws IOException {
-    try {
-      db.write(writeOptions, operation);
-    } catch (RocksDBException e) {
-      throw toIOException("Batch write operation failed", e);
-    }
-  }
-
-//  @Override
-//  public void iterate(byte[] from, EntryConsumer consumer)
-//      throws IOException {
-//
-//    try (RocksIterator it = db.newIterator(handle)) {
-//      if (from != null) {
-//        it.seek(from);
-//      } else {
-//        it.seekToFirst();
-//      }
-//      while (it.isValid()) {
-//        if (!consumer.consume(it.key(), it.value())) {
-//          break;
-//        }
-//        it.next();
-//      }
-//    }
-//  }
-
-  @Override
-  public TableIterator<KeyValue> iterator() {
-    ReadOptions readOptions = new ReadOptions();
-    return new RDBStoreIterator(db.newIterator(handle, readOptions));
-  }
-
-  @Override
-  public String getName() throws IOException {
-    try {
-      return DFSUtil.bytes2String(this.getHandle().getName());
-    } catch (RocksDBException rdbEx) {
-      throw toIOException("Unable to get the table name.", rdbEx);
-    }
-  }
-
-  @Override
-  public void close() throws Exception {
-    // Nothing do for a Column Family.
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
deleted file mode 100644
index 3942585..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.WriteBatch;
-
-import java.io.IOException;
-
-/**
- * Interface for key-value store that stores ozone metadata. Ozone metadata is
- * stored as key value pairs, both key and value are arbitrary byte arrays. Each
- * Table Stores a certain kind of keys and values. This allows a DB to have
- * different kind of tables.
- */
-@InterfaceStability.Evolving
-public interface Table extends AutoCloseable {
-
-  /**
-   * Puts a key-value pair into the store.
-   *
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * @return true if the metadata store is empty.
-   * @throws IOException on Failure
-   */
-  boolean isEmpty() throws IOException;
-
-  /**
-   * Returns the value mapped to the given key in byte array or returns null
-   * if the key is not found.
-   *
-   * @param key metadata key
-   * @return value in byte array or null if the key is not found.
-   * @throws IOException on Failure
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store.
-   *
-   * @param key metadata key
-   * @throws IOException on Failure
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Return the Column Family handle. TODO: This leaks an RockDB abstraction
-   * into Ozone code, cleanup later.
-   *
-   * @return ColumnFamilyHandle
-   */
-  ColumnFamilyHandle getHandle();
-
-  /**
-   * A batch of PUT, DELETE operations handled as a single atomic write.
-   *
-   * @throws IOException write fails
-   */
-  void writeBatch(WriteBatch operation) throws IOException;
-
-  /**
-   * Returns the iterator for this metadata store.
-   *
-   * @return MetaStoreIterator
-   */
-  TableIterator<KeyValue> iterator();
-
-  /**
-   * Returns the Name of this Table.
-   * @return - Table Name.
-   * @throws IOException on failure.
-   */
-  String getName() throws IOException;
-
-  /**
-   * Class used to represent the key and value pair of a db entry.
-   */
-  class KeyValue {
-
-    private final byte[] key;
-    private final byte[] value;
-
-    /**
-     * KeyValue Constructor, used to represent a key and value of a db entry.
-     *
-     * @param key - Key Bytes
-     * @param value - Value bytes
-     */
-    private KeyValue(byte[] key, byte[] value) {
-      this.key = key;
-      this.value = value;
-    }
-
-    /**
-     * Create a KeyValue pair.
-     *
-     * @param key - Key Bytes
-     * @param value - Value bytes
-     * @return KeyValue object.
-     */
-    public static KeyValue create(byte[] key, byte[] value) {
-      return new KeyValue(key, value);
-    }
-
-    /**
-     * Return key.
-     *
-     * @return byte[]
-     */
-    public byte[] getKey() {
-      byte[] result = new byte[key.length];
-      System.arraycopy(key, 0, result, 0, key.length);
-      return result;
-    }
-
-    /**
-     * Return value.
-     *
-     * @return byte[]
-     */
-    public byte[] getValue() {
-      byte[] result = new byte[value.length];
-      System.arraycopy(value, 0, result, 0, value.length);
-      return result;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableConfig.java
deleted file mode 100644
index 897028a..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableConfig.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyOptions;
-
-/**
- * Class that maintains Table Configuration.
- */
-public class TableConfig {
-  private final String name;
-  private final ColumnFamilyOptions columnFamilyOptions;
-
-
-  /**
-   * Constructs a Table Config.
-   * @param name - Name of the Table.
-   * @param columnFamilyOptions - Column Family options.
-   */
-  public TableConfig(String name, ColumnFamilyOptions columnFamilyOptions) {
-    this.name = name;
-    this.columnFamilyOptions = columnFamilyOptions;
-  }
-
-  /**
-   * Returns the Name for this Table.
-   * @return - Name String
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns a ColumnFamilyDescriptor for this table.
-   * @return ColumnFamilyDescriptor
-   */
-  public ColumnFamilyDescriptor getDescriptor() {
-    return  new ColumnFamilyDescriptor(DFSUtil.string2Bytes(name),
-        columnFamilyOptions);
-  }
-
-  /**
-   * Returns Column family options for this Table.
-   * @return  ColumnFamilyOptions used for the Table.
-   */
-  public ColumnFamilyOptions getColumnFamilyOptions() {
-    return columnFamilyOptions;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    TableConfig that = (TableConfig) o;
-    return new EqualsBuilder()
-        .append(getName(), that.getName())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(getName())
-        .toHashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableIterator.java
deleted file mode 100644
index 83a8f3c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TableIterator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import java.io.Closeable;
-import java.util.Iterator;
-
-/**
- * Iterator for MetaDataStore DB.
- *
- * @param <T>
- */
-public interface TableIterator<T> extends Iterator<T>, Closeable {
-
-  /**
-   * seek to first entry.
-   */
-  void seekToFirst();
-
-  /**
-   * seek to last entry.
-   */
-  void seekToLast();
-
-  /**
-   * Seek to the specific key.
-   *
-   * @param key - Bytes that represent the key.
-   * @return T.
-   */
-  T seek(byte[] key);
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/package-info.java
deleted file mode 100644
index 17d676d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Database interfaces for Ozone.
- */
-package org.apache.hadoop.utils.db;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java
deleted file mode 100644
index 4466337..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.utils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
deleted file mode 100644
index 04bfeb2..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.ratis;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.retry.RetryPolicies;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.proto.RaftProtos;
-import org.apache.ratis.util.Preconditions;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
-
-/**
- * Ratis helper methods.
- */
-public interface RatisHelper {
-  Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
-
-  static String toRaftPeerIdString(DatanodeDetails id) {
-    return id.getUuidString();
-  }
-
-  static UUID toDatanodeId(String peerIdString) {
-    return UUID.fromString(peerIdString);
-  }
-
-  static UUID toDatanodeId(RaftPeerId peerId) {
-    return toDatanodeId(peerId.toString());
-  }
-
-  static UUID toDatanodeId(RaftProtos.RaftPeerProto peerId) {
-    return toDatanodeId(RaftPeerId.valueOf(peerId.getId()));
-  }
-
-  static String toRaftPeerAddressString(DatanodeDetails id) {
-    return id.getIpAddress() + ":" +
-        id.getPort(DatanodeDetails.Port.Name.RATIS).getValue();
-  }
-
-  static RaftPeerId toRaftPeerId(DatanodeDetails id) {
-    return RaftPeerId.valueOf(toRaftPeerIdString(id));
-  }
-
-  static RaftPeer toRaftPeer(DatanodeDetails id) {
-    return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id));
-  }
-
-  static List<RaftPeer> toRaftPeers(Pipeline pipeline) {
-    return toRaftPeers(pipeline.getMachines());
-  }
-
-  static <E extends DatanodeDetails> List<RaftPeer> toRaftPeers(
-      List<E> datanodes) {
-    return datanodes.stream().map(RatisHelper::toRaftPeer)
-        .collect(Collectors.toList());
-  }
-
-  /* TODO: use a dummy id for all groups for the moment.
-   *       It should be changed to a unique id for each group.
-   */
-  RaftGroupId DUMMY_GROUP_ID =
-      RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup"));
-
-  RaftGroup EMPTY_GROUP = new RaftGroup(DUMMY_GROUP_ID,
-      Collections.emptyList());
-
-  static RaftGroup emptyRaftGroup() {
-    return EMPTY_GROUP;
-  }
-
-  static RaftGroup newRaftGroup(Collection<RaftPeer> peers) {
-    return peers.isEmpty()? emptyRaftGroup()
-        : new RaftGroup(DUMMY_GROUP_ID, peers);
-  }
-
-  static RaftGroup newRaftGroup(RaftGroupId groupId,
-      Collection<DatanodeDetails> peers) {
-    final List<RaftPeer> newPeers = peers.stream()
-        .map(RatisHelper::toRaftPeer)
-        .collect(Collectors.toList());
-    return peers.isEmpty() ? new RaftGroup(groupId, Collections.emptyList())
-        : new RaftGroup(groupId, newPeers);
-  }
-
-  static RaftGroup newRaftGroup(Pipeline pipeline) {
-    return new RaftGroup(pipeline.getId().getRaftGroupID(),
-        toRaftPeers(pipeline));
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
-      RetryPolicy retryPolicy) {
-    return newRaftClient(rpcType, toRaftPeerId(pipeline.getLeader()),
-        newRaftGroup(pipeline.getId().getRaftGroupID(), pipeline.getMachines()),
-        retryPolicy);
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RetryPolicy retryPolicy) {
-    return newRaftClient(rpcType, leader.getId(),
-        newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy);
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RaftGroup group, RetryPolicy retryPolicy) {
-    return newRaftClient(rpcType, leader.getId(), group, retryPolicy);
-  }
-
-  static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
-      RaftGroup group, RetryPolicy retryPolicy) {
-    LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, leader, group);
-    final RaftProperties properties = new RaftProperties();
-    RaftConfigKeys.Rpc.setType(properties, rpcType);
-
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE));
-
-    return RaftClient.newBuilder()
-        .setRaftGroup(group)
-        .setLeaderId(leader)
-        .setProperties(properties)
-        .setRetryPolicy(retryPolicy)
-        .build();
-  }
-
-  static RetryPolicy createRetryPolicy(Configuration conf) {
-    int maxRetryCount =
-        conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
-            OzoneConfigKeys.
-                DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT);
-    long retryInterval = conf.getTimeDuration(OzoneConfigKeys.
-        DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, OzoneConfigKeys.
-        DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT
-        .toInt(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    long leaderElectionTimeout = conf.getTimeDuration(
-        DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
-        DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
-            .toInt(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    long clientRequestTimeout = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .toInt(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    long retryCacheTimeout = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
-            .toInt(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
-    Preconditions
-        .assertTrue(maxRetryCount * retryInterval > 5 * leaderElectionTimeout,
-            "Please make sure dfs.ratis.client.request.max.retries * "
-                + "dfs.ratis.client.request.retry.interval > "
-                + "5 * dfs.ratis.leader.election.minimum.timeout.duration");
-    Preconditions.assertTrue(
-        maxRetryCount * (retryInterval + clientRequestTimeout)
-            < retryCacheTimeout,
-        "Please make sure "
-            + "(dfs.ratis.client.request.max.retries * "
-            + "(dfs.ratis.client.request.retry.interval + "
-            + "dfs.ratis.client.request.timeout.duration)) "
-            + "< dfs.ratis.server.retry-cache.timeout.duration");
-    TimeDuration sleepDuration =
-        TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS);
-    RetryPolicy retryPolicy = RetryPolicies
-        .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration);
-    return retryPolicy;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java
deleted file mode 100644
index c13c20c..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ratis;
-
-/**
- * This package contains classes related to Apache Ratis.
- */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message