trafodion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From se...@apache.org
Subject [1/2] incubator-trafodion git commit: [TRAFODION-2129] Trafodion to avoid use of deprecated HBase APIs/Classes
Date Tue, 02 Aug 2016 18:24:05 GMT
Repository: incubator-trafodion
Updated Branches:
  refs/heads/master 2d7a91f78 -> 7fe96649f


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TmAuditTlog.java
----------------------------------------------------------------------
diff --git a/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TmAuditTlog.java b/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TmAuditTlog.java
index bd423ea..e097122 100644
--- a/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TmAuditTlog.java
+++ b/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TmAuditTlog.java
@@ -35,12 +35,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Delete;
@@ -113,14 +112,13 @@ import java.util.concurrent.RejectedExecutionException;
 public class TmAuditTlog {
 
    static final Log LOG = LogFactory.getLog(TmAuditTlog.class);
-   private static HBaseAdmin admin;
    private Configuration config;
    private static String TLOG_TABLE_NAME;
    private static final byte[] TLOG_FAMILY = Bytes.toBytes("tf");
    private static final byte[] ASN_STATE = Bytes.toBytes("as");
    private static final byte[] QUAL_TX_STATE = Bytes.toBytes("tx");
    private static HTable[] table;
-   private static HConnection connection;
+   private static Connection connection;
    private static HBaseAuditControlPoint tLogControlPoint;
    private static long tLogControlPointNum;
    private static long tLogHashKey;
@@ -188,7 +186,7 @@ public class TmAuditTlog {
       byte[] endKey_orig;
       byte[] endKey;
 
-     TlogCallable(TransactionState txState, HRegionLocation location, HConnection connection) throws IOException {
+     TlogCallable(TransactionState txState, HRegionLocation location, Connection connection) throws IOException {
         transactionState = txState;
         this.location = location;
         table = new HTable(location.getRegionInfo().getTable(), connection, tlogThreadPool);
@@ -355,9 +353,10 @@ public class TmAuditTlog {
       }
    }
 
-   public TmAuditTlog (Configuration config) throws IOException, RuntimeException {
+   public TmAuditTlog (Configuration config, Connection connection) throws IOException, RuntimeException {
 
       this.config = config;
+      this.connection = connection;
       this.dtmid = Integer.parseInt(config.get("dtmid"));
       if (LOG.isTraceEnabled()) LOG.trace("Enter TmAuditTlog constructor for dtmid " + dtmid);
       TLOG_TABLE_NAME = config.get("TLOG_TABLE_NAME");
@@ -442,8 +441,6 @@ public class TmAuditTlog {
          LOG.error("TM_TLOG_RETRY_COUNT is not valid in ms.env");
       }
 
-      connection = HConnectionManager.createConnection(config);
-
       tlogNumLogs = 1;
       try {
          String numLogs = System.getenv("TM_TLOG_NUM_LOGS");
@@ -504,7 +501,6 @@ public class TmAuditTlog {
          hcol.setBlockCacheEnabled(false);
       }
       hcol.setMaxVersions(versions);
-      admin = new HBaseAdmin(config);
 
       filler = new byte[fillerSize];
       Arrays.fill(filler, (byte) ' ');
@@ -538,39 +534,30 @@ public class TmAuditTlog {
       long lvAsn = 0;
 
       if (LOG.isTraceEnabled()) LOG.trace("try new HBaseAuditControlPoint");
-      tLogControlPoint = new HBaseAuditControlPoint(config);
+      tLogControlPoint = new HBaseAuditControlPoint(config, connection);
 
       tlogAuditLock =    new Object[tlogNumLogs];
       table = new HTable[tlogNumLogs];
 
-//      try {
          // Get the asn from the last control point.  This ignores 
          // any asn increments between the last control point
          // write and a system crash and could result in asn numbers
          // being reused.  However this would just mean that some old 
          // records are held onto a bit longer before cleanup and is safe.
          asn.set(tLogControlPoint.getStartingAuditSeqNum());
-/*
-      // TODO: revisit
-      }
-      catch (Exception e2){
-         if (LOG.isDebugEnabled()) LOG.debug("Exception setting the ASN " + e2);
-         if (LOG.isDebugEnabled()) LOG.debug("Setting the ASN to 1");
-         asn.set(1L);  // Couldn't read the asn so start asn at 1
-      }
-*/
 
+      Admin admin  = connection.getAdmin();
       for (int i = 0 ; i < tlogNumLogs; i++) {
          tlogAuditLock[i]      = new Object();
          String lv_tLogName = new String(TLOG_TABLE_NAME + "_LOG_" + Integer.toHexString(i));
-         boolean lvTlogExists = admin.tableExists(lv_tLogName);
+         boolean lvTlogExists = admin.tableExists(TableName.valueOf(lv_tLogName));
          if (LOG.isTraceEnabled()) LOG.trace("Tlog table " + lv_tLogName + (lvTlogExists? " exists" : " does not exist" ));
          HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(lv_tLogName));
          desc.addFamily(hcol);
 
          if (lvTlogExists == false) {
             // Need to prime the asn for future writes
-            try {
+          try {
               if (LOG.isTraceEnabled()) LOG.trace("Creating the table " + lv_tLogName);
                admin.createTable(desc);
                asn.set(1L);  // TLOG didn't exist previously, so start asn at 1
@@ -586,6 +573,7 @@ public class TmAuditTlog {
          table[i].setAutoFlushTo(this.useAutoFlush);
 
       }
+      admin.close();
 
       lvAsn = asn.get();
       // This control point write needs to be delayed until after recovery completes, 
@@ -671,18 +659,16 @@ public class TmAuditTlog {
          // We need to send this to a remote Tlog, not our local one, so open the appropriate table
          if (LOG.isTraceEnabled()) LOG.trace("putSingleRecord writing to remote Tlog for transid: " + lvTransid + " state: " + lvTxState + " ASN: " + lvAsn
                   + " in thread " + threadId);
-         HTableInterface recoveryTable;
+         Table recoveryTable;
          int lv_ownerNid = (int)(lvTransid >> 32);
          String lv_tLogName = new String("TRAFODION._DTM_.TLOG" + String.valueOf(lv_ownerNid) + "_LOG_" + Integer.toHexString(lv_lockIndex));
-         HConnection recoveryTableConnection = HConnectionManager.createConnection(this.config);
-         recoveryTable = recoveryTableConnection.getTable(TableName.valueOf(lv_tLogName));
+         recoveryTable = connection.getTable(TableName.valueOf(lv_tLogName));
 
          try {
             recoveryTable.put(p);
          }
          finally {
             recoveryTable.close();
-            recoveryTableConnection.close();
          }
       }
       else {
@@ -827,10 +813,9 @@ public class TmAuditTlog {
 
    public boolean deleteAgedEntries(final long lvAsn) throws IOException {
       if (LOG.isTraceEnabled()) LOG.trace("deleteAgedEntries start:  Entries older than " + lvAsn + " will be removed");
-      HTableInterface deleteTable;
+      Table deleteTable;
       for (int i = 0; i < tlogNumLogs; i++) {
          String lv_tLogName = new String(TLOG_TABLE_NAME + "_LOG_" + Integer.toHexString(i));
-//         Connection deleteConnection = ConnectionFactory.createConnection(this.config);
 
          if (LOG.isTraceEnabled()) LOG.trace("delete table is: " + lv_tLogName);
 
@@ -1025,7 +1010,7 @@ public class TmAuditTlog {
 
       // This request might be for a transaction not originating on this node, so we need to open
       // the appropriate Tlog
-      HTableInterface unknownTransactionTable;
+      Table unknownTransactionTable;
       long lvTransid = ts.getTransactionId();
       int lv_ownerNid = (int)(lvTransid >> 32);
       int lv_lockIndex = (int)(lvTransid & tLogHashKey);
@@ -1206,13 +1191,12 @@ public class TmAuditTlog {
 
          if (LOG.isTraceEnabled()) LOG.trace("deleteEntriesOlderThanASN: "
               + pv_ASN + ", in thread: " + threadId);
-         HTableInterface targetTable;
          List<HRegionLocation> regionList;
 
          // For every Tlog table for this node
          for (int index = 0; index < tlogNumLogs; index++) {
             String lv_tLogName = new String("TRAFODION._DTM_.TLOG" + String.valueOf(this.dtmid) + "_LOG_" + Integer.toHexString(index));
-            regionList = connection.locateRegions(TableName.valueOf(lv_tLogName), false, false);
+            regionList = connection.getRegionLocator(TableName.valueOf(lv_tLogName)).getAllRegionLocations();
             loopIndex++;
             int regionIndex = 0;
             // For every region in this table

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TrafInfo.java
----------------------------------------------------------------------
diff --git a/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TrafInfo.java b/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TrafInfo.java
index 1cd335b..777eaea 100644
--- a/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TrafInfo.java
+++ b/core/sqf/src/seatrans/tm/hbasetmlib2/src/main/java/org/trafodion/dtm/TrafInfo.java
@@ -32,9 +32,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 //H98import org.apache.hadoop.hbase.ipc.HMasterInterface;
 //H98import org.apache.hadoop.hbase.ipc.TransactionalRegionInterface;
 //H98import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -42,8 +41,7 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
 
 public class TrafInfo {
 
-    private HBaseAdmin hbadmin;
-    private HConnection connection;
+    private Connection connection;
     Configuration     config;
     //    HMasterInterface  hmaster;
 
@@ -53,8 +51,7 @@ public class TrafInfo {
 
     public void init() throws IOException {
         this.config = HBaseConfiguration.create();
-        this.connection = HConnectionManager.createConnection(config);
-        hbadmin = new HBaseAdmin(config);
+        this.connection = ConnectionFactory.createConnection(config);
     }
 
     public static void printHelp() {
@@ -141,7 +138,7 @@ public class TrafInfo {
                     }
                 }
 
-            } catch(Exception e) {
+            } catch(IOException e) {
                 System.out.println("ERROR: Unable to get region info, Exiting");
                 e.printStackTrace();
                 System.exit(1);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/executor/ExExeUtilLoad.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExExeUtilLoad.cpp b/core/sql/executor/ExExeUtilLoad.cpp
index f1bb391..b0e8caf 100644
--- a/core/sql/executor/ExExeUtilLoad.cpp
+++ b/core/sql/executor/ExExeUtilLoad.cpp
@@ -2350,18 +2350,6 @@ short ExExeUtilHBaseBulkUnLoadTcb::work()
     case UNLOAD_END_:
     case UNLOAD_END_ERROR_:
     {
-      if (step_ == UNLOAD_END_ &&
-          (hblTdb().getScanType() == ComTdbExeUtilHBaseBulkUnLoad::SNAPSHOT_SCAN_CREATE ||
-           hblTdb().getScanType() == ComTdbExeUtilHBaseBulkUnLoad::SNAPSHOT_SCAN_EXISTING))
-      {
-        sfwRetCode = sequenceFileWriter_->release( );
-        if (sfwRetCode != SFW_OK)
-        {
-          createHdfsFileError(sfwRetCode);
-          step_ = UNLOAD_END_ERROR_;
-          break;
-        }
-      }
       if (restoreCQD("TRAF_TABLE_SNAPSHOT_SCAN") < 0)
       {
         step_ = UNLOAD_ERROR_;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/executor/HBaseClient_JNI.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBaseClient_JNI.cpp b/core/sql/executor/HBaseClient_JNI.cpp
index c4ec9a5..9f3bd70 100644
--- a/core/sql/executor/HBaseClient_JNI.cpp
+++ b/core/sql/executor/HBaseClient_JNI.cpp
@@ -208,8 +208,6 @@ HBC_RetCode HBaseClient_JNI::init()
     
     JavaMethods_[JM_CTOR       ].jm_name      = "<init>";
     JavaMethods_[JM_CTOR       ].jm_signature = "()V";
-    JavaMethods_[JM_GET_ERROR  ].jm_name      = "getLastError";
-    JavaMethods_[JM_GET_ERROR  ].jm_signature = "()Ljava/lang/String;";
     JavaMethods_[JM_INIT       ].jm_name      = "init";
     JavaMethods_[JM_INIT       ].jm_signature = "(Ljava/lang/String;Ljava/lang/String;)Z";
     JavaMethods_[JM_CLEANUP    ].jm_name      = "cleanup";
@@ -296,14 +294,6 @@ HBC_RetCode HBaseClient_JNI::init()
 //////////////////////////////////////////////////////////////////////////////
 // 
 //////////////////////////////////////////////////////////////////////////////
-NAString HBaseClient_JNI::getLastJavaError()
-{
-  return JavaObjectInterface::getLastJavaError(JavaMethods_[JM_GET_ERROR].methodID);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// 
-//////////////////////////////////////////////////////////////////////////////
 HBC_RetCode HBaseClient_JNI::initConnection(const char* zkServers, const char* zkPort)
 {
   QRLogger::log(CAT_SQL_HBASE, LL_DEBUG, "HBaseClient_JNI::initConnection(%s, %s) called.", zkServers, zkPort);
@@ -331,9 +321,6 @@ HBC_RetCode HBaseClient_JNI::initConnection(const char* zkServers, const char* z
   // boolean init(java.lang.String, java.lang.String); 
   jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_INIT].methodID, js_zkServers, js_zkPort);
 
-  jenv_->DeleteLocalRef(js_zkServers);  
-  jenv_->DeleteLocalRef(js_zkPort);  
-
   if (jenv_->ExceptionCheck())
   {
     getExceptionDetails();
@@ -1045,13 +1032,6 @@ HBC_RetCode HBaseClient_JNI::registerTruncateOnAbort(const char* fileName, Int64
     jenv_->PopLocalFrame(NULL);
     return HBC_ERROR_DROP_EXCEPTION;
   }
-
-  if (jresult == false)
-  {
-    logError(CAT_SQL_HBASE, "HBaseClient_JNI::drop()", getLastJavaError());
-    jenv_->PopLocalFrame(NULL);
-    return HBC_ERROR_DROP_EXCEPTION;
-  }
   jenv_->PopLocalFrame(NULL);
   return HBC_OK;
 }
@@ -1090,12 +1070,6 @@ HBC_RetCode HBaseClient_JNI::drop(const char* fileName, JNIEnv* jenv, Int64 tran
     return HBC_ERROR_DROP_EXCEPTION;
   }
 
-  if (jresult == false) 
-  {
-    logError(CAT_SQL_HBASE, "HBaseClient_JNI::drop()", getLastJavaError());
-    jenv_->PopLocalFrame(NULL);
-    return HBC_ERROR_DROP_EXCEPTION;
-  }
   jenv_->PopLocalFrame(NULL);
   return HBC_OK;
 }
@@ -1144,12 +1118,6 @@ HBC_RetCode HBaseClient_JNI::dropAll(const char* pattern, bool async, Int64 tran
     return HBC_ERROR_DROP_EXCEPTION;
   }
 
-  if (jresult == false) 
-  {
-    logError(CAT_SQL_HBASE, "HBaseClient_JNI::dropAll()", getLastJavaError());
-    jenv_->PopLocalFrame(NULL);
-    return HBC_ERROR_DROP_EXCEPTION;
-  }
   jenv_->PopLocalFrame(NULL);
   return HBC_OK;
 }
@@ -1708,8 +1676,6 @@ HBLC_RetCode HBulkLoadClient_JNI::init()
 
     JavaMethods_[JM_CTOR       ].jm_name      = "<init>";
     JavaMethods_[JM_CTOR       ].jm_signature = "()V";
-    JavaMethods_[JM_GET_ERROR  ].jm_name      = "getLastError";
-    JavaMethods_[JM_GET_ERROR  ].jm_signature = "()Ljava/lang/String;";
     JavaMethods_[JM_INIT_HFILE_PARAMS     ].jm_name      = "initHFileParams";
     JavaMethods_[JM_INIT_HFILE_PARAMS     ].jm_signature = "(Ljava/lang/String;Ljava/lang/String;JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;)Z";
     JavaMethods_[JM_CLOSE_HFILE      ].jm_name      = "closeHFile";
@@ -2265,13 +2231,6 @@ HBulkLoadClient_JNI::~HBulkLoadClient_JNI()
   //QRLogger::log(CAT_JNI_TOP, LL_DEBUG, "HBulkLoadClient_JNI destructor called.");
 }
 
-NAString HBulkLoadClient_JNI::getLastJavaError()
-{
-  return JavaObjectInterface::getLastJavaError(JavaMethods_[JM_GET_ERROR].methodID);
-}
-
-
-
 
 ////////////////////////////////////////////////////////////////////
 HBC_RetCode HBaseClient_JNI::revoke(const Text& user, const Text& tblName, const TextVec& actions)
@@ -3406,9 +3365,6 @@ HTC_RetCode HTableClient_JNI::init()
       return (HTC_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, javaMethodsInitialized_);
     }
     JavaMethods_ = new JavaMethodInit[JM_LAST];
-    
-    JavaMethods_[JM_CTOR       ].jm_name      = "<init>";
-    JavaMethods_[JM_CTOR       ].jm_signature = "()V";
     JavaMethods_[JM_GET_ERROR  ].jm_name      = "getLastError";
     JavaMethods_[JM_GET_ERROR  ].jm_signature = "()Ljava/lang/String;";
     JavaMethods_[JM_SCAN_OPEN  ].jm_name      = "startScan";

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/executor/HBaseClient_JNI.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBaseClient_JNI.h b/core/sql/executor/HBaseClient_JNI.h
index 83f3447..05ab77a 100644
--- a/core/sql/executor/HBaseClient_JNI.h
+++ b/core/sql/executor/HBaseClient_JNI.h
@@ -273,8 +273,7 @@ private:
   NAString getLastJavaError();
 
   enum JAVA_METHODS {
-    JM_CTOR = 0
-   ,JM_GET_ERROR 
+    JM_GET_ERROR
    ,JM_SCAN_OPEN 
    ,JM_DELETE    
    ,JM_COPROC_AGGR
@@ -514,7 +513,6 @@ private:
 private:  
   enum JAVA_METHODS {
     JM_CTOR = 0
-   ,JM_GET_ERROR 
    ,JM_INIT
    ,JM_CLEANUP   
    ,JM_GET_HTC
@@ -736,7 +734,6 @@ private:
 
   enum JAVA_METHODS {
     JM_CTOR = 0
-   ,JM_GET_ERROR
    ,JM_INIT_HFILE_PARAMS
    ,JM_CLOSE_HFILE
    ,JM_DO_BULK_LOAD

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/executor/SequenceFileReader.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.cpp b/core/sql/executor/SequenceFileReader.cpp
index 7a4a5f6..af31931 100644
--- a/core/sql/executor/SequenceFileReader.cpp
+++ b/core/sql/executor/SequenceFileReader.cpp
@@ -530,8 +530,6 @@ SFW_RetCode SequenceFileWriter::init()
     JavaMethods_[JM_CREATE_SNAPSHOT].jm_signature = "(Ljava/lang/String;Ljava/lang/String;)Z";
     JavaMethods_[JM_DELETE_SNAPSHOT].jm_name      = "deleteSnapshot";
     JavaMethods_[JM_DELETE_SNAPSHOT].jm_signature = "(Ljava/lang/String;)Z";
-    JavaMethods_[JM_RELEASE].jm_name      = "release";
-    JavaMethods_[JM_RELEASE].jm_signature = "()Z";
     JavaMethods_[JM_VERIFY_SNAPSHOT].jm_name      = "verifySnapshot";
     JavaMethods_[JM_VERIFY_SNAPSHOT].jm_signature = "(Ljava/lang/String;Ljava/lang/String;)Z";
     JavaMethods_[JM_HDFS_DELETE_PATH].jm_name      = "hdfsDeletePath";
@@ -925,30 +923,6 @@ SFW_RetCode SequenceFileWriter::deleteSnapshot( const NAString&  snapshotName)
 }
 
 
-SFW_RetCode SequenceFileWriter::release( )
-{
-  QRLogger::log(CAT_SQL_HBASE, LL_DEBUG, "SequenceFileWriter::release() called.");
-
-  tsRecentJMFromJNI = JavaMethods_[JM_RELEASE].jm_full_name;
-  jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_RELEASE].methodID);
-
-  if (jenv_->ExceptionCheck())
-  {
-    getExceptionDetails();
-    logError(CAT_SQL_HBASE, __FILE__, __LINE__);
-    logError(CAT_SQL_HBASE, "SequenceFileWriter::release()", getLastError());
-    return SFW_ERROR_RELEASE_EXCEPTION;
-  }
-
-  if (jresult == false)
-  {
-    logError(CAT_SQL_HBASE, "SequenceFileWriter::release()", getLastError());
-    return SFW_ERROR_RELEASE_EXCEPTION;
-  }
-
-  return SFW_OK;
-}
-
 SFW_RetCode SequenceFileWriter::hdfsMergeFiles( const NAString& srcPath,
                                                 const NAString& dstPath)
 {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/executor/SequenceFileReader.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.h b/core/sql/executor/SequenceFileReader.h
index 0ca11bd..9180de8 100644
--- a/core/sql/executor/SequenceFileReader.h
+++ b/core/sql/executor/SequenceFileReader.h
@@ -160,7 +160,6 @@ typedef enum {
  ,SFW_ERROR_CREATE_SNAPSHOT_EXCEPTION
  ,SFW_ERROR_DELETE_SNAPSHOT_PARAM
  ,SFW_ERROR_DELETE_SNAPSHOT_EXCEPTION
- ,SFW_ERROR_RELEASE_EXCEPTION
  ,SFW_ERROR_VERIFY_SNAPSHOT_PARAM
  ,SFW_ERROR_VERIFY_SNAPSHOT_EXCEPTION
  ,SFW_ERROR_HDFS_DELETE_PATH_PARAM
@@ -240,7 +239,6 @@ private:
     JM_HDFS_EXISTS,
     JM_CREATE_SNAPSHOT,
     JM_DELETE_SNAPSHOT,
-    JM_RELEASE,
     JM_VERIFY_SNAPSHOT,
     JM_HDFS_DELETE_PATH,
     JM_LAST

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/regress/privs1/DIFF133.KNOWN
----------------------------------------------------------------------
diff --git a/core/sql/regress/privs1/DIFF133.KNOWN b/core/sql/regress/privs1/DIFF133.KNOWN
index 3ad9979..444108c 100644
--- a/core/sql/regress/privs1/DIFF133.KNOWN
+++ b/core/sql/regress/privs1/DIFF133.KNOWN
@@ -1,25 +1,16 @@
-288,290d287
-< 
-< 
-< 
-326,343c323
-< OBJECT_NAME TYPE GRANTEE PRIVILEGES_BITMAP GRANTABLE_BITMAP
-< ----------
-< 
-< TRAFODION."PRIVMGR_MD"."COMPONENTS" BT DB__ROOT 47 47
-< TRAFODION."PRIVMGR_MD"."COMPONENT_OPERAT BT DB__ROOT 47 47
-< TRAFODION."PRIVMGR_MD"."COMPONENT_PRIVIL BT DB__ROOT 47 47
-< TRAFODION."PRIVMGR_MD"."OBJECT_PRIVILEGE BT DB__ROOT 47 47
-< TRAFODION."PRIVMGR_MD"."ROLE_USAGE" BT DB__ROOT 47 47
-< TRAFODION."T133SCH"."GAMES" BT DB__ROOT 47 47
-< TRAFODION."T133SCH"."GAMES_BY_PLAYER" VI DB__ROOT 1 0
-< TRAFODION."T133SCH"."HOME_TEAMS_GAMES" VI DB__ROOT 1 0
-< TRAFODION."T133SCH"."PLAYERS" BT DB__ROOT 47 47
-< TRAFODION."T133SCH"."PLAYERS_ON_TEAM" VI DB__ROOT 1 0
-< TRAFODION."T133SCH"."STANDINGS" BT DB__ROOT 47 47
-< TRAFODION."T133SCH"."TEAMS" BT DB__ROOT 47 47
-< TRAFODION."T133SCH"."TEAM_STATISTICS" BT DB__ROOT 47 47
-< 
-< --- 13 row(s) selected.
+350c350,352
+< --- SQL operation complete.
 ---
-> --- 0 row(s) selected.
+> *** ERROR[8606] Transaction subsystem DTM returned error 97 on a commit transaction.
+> 
+> --- SQL operation failed with errors.
+361d362
+< ROLE_USAGE
+392d392
+< TRAFODION._PRIVMGR_MD_.ROLE_USAGE BT -2 DB__ROOT SIDU-R-
+394c394
+< --- 23 row(s) selected.
+---
+> --- 22 row(s) selected.
+409d408
+< ROLE_USAGE

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java b/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
index 18fa596..ea2c7d8 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
@@ -38,6 +38,9 @@ import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.Logger;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
@@ -45,7 +48,6 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.transactional.RMInterface;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -99,9 +101,8 @@ import com.google.protobuf.ServiceException;
 public class HBaseClient {
 
     static Logger logger = Logger.getLogger(HBaseClient.class.getName());
-    public static Configuration config = HBaseConfiguration.create();
-    String lastError;
-    RMInterface table = null;
+    private static Configuration config = HBaseConfiguration.create();
+    private RMInterface table = null;
 
     private PoolMap<String, HTableClient> hTableClientsFree;
     private PoolMap<String, HTableClient> hTableClientsInUse;
@@ -131,7 +132,7 @@ public class HBaseClient {
     public static final int HBASE_MEMSTORE_FLUSH_SIZE = 21;
     public static final int HBASE_SPLIT_POLICY = 22;
 
-    
+    private static Connection connection; 
     public HBaseClient() {
       if (hTableClientsFree == null)
          hTableClientsFree = new PoolMap<String, HTableClient>
@@ -140,14 +141,6 @@ public class HBaseClient {
                (PoolType.Reusable, Integer.MAX_VALUE);
     }
 
-    public String getLastError() {
-        return lastError;
-    }
-
-    void setLastError(String err) {
-        lastError = err;
-    }
-
     static {
     	//Some clients of this class e.g., DcsServer/JdbcT2 
     	//want to use use their own log4j.properties file instead
@@ -163,15 +156,21 @@ public class HBaseClient {
     	PropertyConfigurator.configure(confFile);
     }
 
-    public boolean init(String zkServers, String zkPort) 
+    
+    static public Connection getConnection() throws IOException {
+        if (connection == null) 
+              connection = ConnectionFactory.createConnection(config);
+        return connection;
+    }
+
+    public boolean init(String connectParam1, String connectParam2)
 	throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
     {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.init(" + zkServers + ", " + zkPort
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.init(" + connectParam1 + ", " + connectParam2
                          + ") called.");
-        HBaseAdmin.checkHBaseAvailable(config);
-
-        table = new RMInterface();
-        
+        if (connection != null)
+           connection = ConnectionFactory.createConnection(config); 
+        table = new RMInterface(connection);
         return true;
     }
  
@@ -249,7 +248,7 @@ public class HBaseClient {
               metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
             metaColDesc.setInMemory(true);
             desc.addFamily(metaColDesc);
-            HBaseAdmin admin = new HBaseAdmin(config);
+            Admin admin = getConnection().getAdmin();
             admin.createTable(desc);
             admin.close();
             return true;
@@ -515,7 +514,7 @@ public class HBaseClient {
               metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
             metaColDesc.setInMemory(true);
             desc.addFamily(metaColDesc);
-            HBaseAdmin admin = new HBaseAdmin(config);
+            Admin admin = getConnection().getAdmin();
                if (beginEndKeys != null && beginEndKeys.length > 0)
                {
                   byte[][] keys = new byte[beginEndKeys.length][];
@@ -550,7 +549,7 @@ public class HBaseClient {
         return true;
     }
 
-    private void waitForCompletion(String tblName,HBaseAdmin admin) 
+    private void waitForCompletion(String tblName,Admin admin) 
         throws IOException {
         // poll for completion of an asynchronous operation
         boolean keepPolling = true;
@@ -576,8 +575,8 @@ public class HBaseClient {
 
         if (logger.isDebugEnabled()) logger.debug("HBaseClient.alter(" + tblName + ") called.");
         cleanupCache(tblName);
-        HBaseAdmin admin = new HBaseAdmin(config);
-        HTableDescriptor htblDesc = admin.getTableDescriptor(tblName.getBytes());       
+        Admin admin = getConnection().getAdmin();
+        HTableDescriptor htblDesc = admin.getTableDescriptor(TableName.valueOf(tblName));       
         HColumnDescriptor[] families = htblDesc.getColumnFamilies();
 
         String colFam = (String)tableOptions[HBASE_NAME];
@@ -630,11 +629,11 @@ public class HBaseClient {
                 // so we have to have additional code to poll for their completion
                 // (I hear that synchronous versions will be available in HBase 1.x)
                 if (status.tableDescriptorChanged()) {
-                    admin.modifyTable(tblName,htblDesc);
+                    admin.modifyTable(TableName.valueOf(tblName),htblDesc);
                     waitForCompletion(tblName,admin);
                 }
                 else if (status.columnDescriptorChanged()) {
-                    admin.modifyColumn(tblName,colDesc);                  
+                    admin.modifyColumn(TableName.valueOf(tblName),colDesc);                  
                     waitForCompletion(tblName,admin);
                 }
                 admin.close();
@@ -646,15 +645,16 @@ public class HBaseClient {
     public boolean drop(String tblName, long transID)
              throws MasterNotRunningException, IOException {
         if (logger.isDebugEnabled()) logger.debug("HBaseClient.drop(" + tblName + ") called.");
-        HBaseAdmin admin = new HBaseAdmin(config);
+        Admin admin = getConnection().getAdmin();
         try {
            if(transID != 0) {
               table.dropTable(tblName, transID);
            }
            else {
-               if (admin.isTableEnabled(tblName))
-                   admin.disableTable(tblName);
-              admin.deleteTable(tblName);
+               TableName tableName = TableName.valueOf(tblName);
+               if (admin.isTableEnabled(tableName))
+                   admin.disableTable(tableName);
+              admin.deleteTable(tableName);
            }
            cleanupCache(tblName);
         } finally {
@@ -666,8 +666,7 @@ public class HBaseClient {
     public boolean dropAll(String pattern, long transID) 
              throws MasterNotRunningException, IOException {
             if (logger.isDebugEnabled()) logger.debug("HBaseClient.dropAll(" + pattern + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-
+            Admin admin = getConnection().getAdmin();
 	    HTableDescriptor[] htdl = admin.listTables(pattern);
 	    if (htdl == null) // no tables match the given pattern.
 		return true;
@@ -682,15 +681,15 @@ public class HBaseClient {
                     continue;
                 try {
                     if(transID != 0) {
-                        //                        System.out.println("tblName " + tblName);
                         table.dropTable(tblName, transID);
                     }
                     else {
-                        if (! admin.isTableEnabled(tblName))
-                            admin.enableTable(tblName);
+                        TableName tableName = TableName.valueOf(tblName);
+                        if (! admin.isTableEnabled(tableName))
+                            admin.enableTable(tableName);
                         
-                        admin.disableTable(tblName);
-                        admin.deleteTable(tblName);
+                        admin.disableTable(tableName);
+                        admin.deleteTable(tableName);
                     }
                 }
                 
@@ -715,7 +714,7 @@ public class HBaseClient {
     public byte[][] listAll(String pattern) 
              throws MasterNotRunningException, IOException {
             if (logger.isDebugEnabled()) logger.debug("HBaseClient.listAll(" + pattern + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
+            Admin admin = getConnection().getAdmin();
 
 	    HTableDescriptor[] htdl = 
                 (pattern.isEmpty() ? admin.listTables() : admin.listTables(pattern));
@@ -739,7 +738,7 @@ public class HBaseClient {
              throws MasterNotRunningException, IOException {
             if (logger.isDebugEnabled()) logger.debug("HBaseClient.getRegionStats(" + tableName + ") called.");
 
-            HBaseAdmin admin = new HBaseAdmin(config);
+            Admin admin = getConnection().getAdmin();
             HTable htbl = new HTable(config, tableName);
             HRegionInfo hregInfo = null;
             byte[][] regionInfo = null;
@@ -781,7 +780,6 @@ public class HBaseClient {
                     regionInfo[i++] = oneRegion.getBytes();
 
                 }
-
             }
             finally {
                 admin.close();
@@ -793,7 +791,7 @@ public class HBaseClient {
     public boolean copy(String srcTblName, String tgtTblName, boolean force)
 	throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException {
             if (logger.isDebugEnabled()) logger.debug("HBaseClient.copy(" + srcTblName + tgtTblName + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
+            Admin admin = getConnection().getAdmin();
 	    
 	    String snapshotName = srcTblName + "_SNAPSHOT";
 	    
@@ -809,19 +807,21 @@ public class HBaseClient {
 			    }
 		    }
 		}
+            TableName tgtTableName = TableName.valueOf(tgtTblName);
+            TableName srcTableName = TableName. valueOf(srcTblName);
 
             if ((force == true) &&
-                (admin.tableExists(tgtTblName))) {
-                admin.disableTable(tgtTblName);
-                admin.deleteTable(tgtTblName);
+                (admin.tableExists(tgtTableName))) {
+                admin.disableTable(tgtTableName);
+                admin.deleteTable(tgtTableName);
             }
                 
-	    if (! admin.isTableDisabled(srcTblName))
-		admin.disableTable(srcTblName);
-	    admin.snapshot(snapshotName, srcTblName);
-	    admin.cloneSnapshot(snapshotName, tgtTblName);
+	    if (! admin.isTableDisabled(srcTableName))
+		admin.disableTable(srcTableName);
+	    admin.snapshot(snapshotName, srcTableName);
+	    admin.cloneSnapshot(snapshotName, tgtTableName);
 	    admin.deleteSnapshot(snapshotName);
-	    admin.enableTable(srcTblName);
+	    admin.enableTable(srcTableName);
             admin.close();
             return true;
     }
@@ -829,8 +829,8 @@ public class HBaseClient {
     public boolean exists(String tblName, long transID)  
            throws MasterNotRunningException, IOException {
             if (logger.isDebugEnabled()) logger.debug("HBaseClient.exists(" + tblName + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-            boolean result = admin.tableExists(tblName);
+            Admin admin = getConnection().getAdmin();
+            boolean result = admin.tableExists(TableName.valueOf(tblName));
             admin.close();
             return result;
     }
@@ -842,13 +842,13 @@ public class HBaseClient {
                          + (useTRex ? ", use TRX" : ", no TRX") + ") called.");
        HTableClient htable = hTableClientsFree.get(tblName);
        if (htable == null) {
-          htable = new HTableClient();
+          htable = new HTableClient(getConnection());
           if (htable.init(tblName, useTRex) == false) {
              if (logger.isDebugEnabled()) logger.debug("  ==> Error in init(), returning empty.");
              return null;
           }
 
-          HBaseAdmin admin = new HBaseAdmin(config);
+          Admin admin = getConnection().getAdmin();
           HTableDescriptor tblDesc = admin.getTableDescriptor(TableName.valueOf(tblName));
           if (logger.isDebugEnabled()) logger.debug("check coprocessor num for tbl : "+ tblName+". coprocessor size : "+tblDesc.getCoprocessors().size());
           boolean added = CoprocessorUtils.addCoprocessor(config.get("hbase.coprocessor.region.classes"), tblDesc);
@@ -856,9 +856,10 @@ public class HBaseClient {
               if (logger.isDebugEnabled())
                   logger.debug("  ==> add coprocessor for table : " + tblName);
               synchronized (admin) {
-                  admin.disableTable(tblName);
-                  admin.modifyTable(tblName, tblDesc);
-                  admin.enableTable(tblName);
+                  TableName table = TableName.valueOf(tblName);
+                  admin.disableTable(table);
+                  admin.modifyTable(table, tblDesc);
+                  admin.enableTable(table);
               }
           }
 
@@ -980,7 +981,7 @@ public class HBaseClient {
       if (rowSize == 0)
         return 0;
 
-      HBaseAdmin admin = new HBaseAdmin(config);
+      Admin admin = getConnection().getAdmin();
       HTable htbl = new HTable(config, tblName);
       long totalMemStoreBytes = 0;
       try {
@@ -1497,7 +1498,7 @@ public class HBaseClient {
   //associated with it
   public String getLatestSnapshot(String tabName) throws IOException
   {
-    HBaseAdmin admin = new HBaseAdmin(config);
+    Admin admin = getConnection().getAdmin();
     List<SnapshotDescription> snapDescs = admin.listSnapshots();
     long maxTimeStamp = 0;
     String latestsnpName = null;
@@ -1512,7 +1513,6 @@ public class HBaseClient {
       
     }
     admin.close();
-    admin = null;
     return latestsnpName;
   }
   public boolean cleanSnpScanTmpLocation(String pathStr) throws IOException
@@ -1688,14 +1688,14 @@ public class HBaseClient {
   public boolean  createCounterTable(String tabName,  String famName) throws IOException, MasterNotRunningException
   {
     if (logger.isDebugEnabled()) logger.debug("HBaseClient.createCounterTable() - start");
-    HBaseAdmin admin = new HBaseAdmin(config);
-    TableName tn =  TableName.valueOf (tabName);
-    if (admin.tableExists(tabName)) {
+    Admin admin = getConnection().getAdmin();
+    TableName tableName =  TableName.valueOf (tabName);
+    if (admin.tableExists(tableName)) {
         admin.close();
         return true;
     }
 
-    HTableDescriptor desc = new HTableDescriptor(tn);
+    HTableDescriptor desc = new HTableDescriptor(tableName);
     HColumnDescriptor colDesc = new HColumnDescriptor(famName);
     // A counter table is non-DTM-transactional.
     // Use the default maximum versions for MVCC.

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java b/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
index 6f0d15c..4bd104f 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
@@ -77,12 +77,12 @@ import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
 
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
 
 import org.apache.hive.jdbc.HiveDriver;
-import java.sql.Connection;
 import java.sql.Statement;
 import java.sql.DriverManager;
 import java.sql.SQLException;
@@ -96,6 +96,7 @@ public class HBulkLoadClient
   private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir";
   private final static long MAX_HFILE_SIZE = 10737418240L; //10 GB
   
+  private Connection connection_; 
   public static int BLOCKSIZE = 64*1024;
   public static String COMPRESSION = Compression.Algorithm.NONE.getName();
   String lastError;
@@ -111,24 +112,19 @@ public class HBulkLoadClient
   DataBlockEncoding dataBlockEncoding = DataBlockEncoding.NONE;
   FSDataOutputStream fsOut = null;
 
-  public HBulkLoadClient()
+  public HBulkLoadClient() throws IOException
   {
     if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient() called.");
+    connection_ = HBaseClient.getConnection();
   }
 
   public HBulkLoadClient(Configuration conf) throws IOException
   {
     if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient(...) called.");
     config = conf;
+    connection_ = HBaseClient.getConnection();
   }
 
-  public String getLastError() {
-    return lastError;
-  }
-
-  void setLastError(String err) {
-      lastError = err;
-  }
   public boolean initHFileParams(String hFileLoc, String hFileNm, long userMaxSize /*in MBs*/, String tblName,
                                  String sampleTblName, String sampleTblDDL) 
   throws UnsupportedOperationException, IOException, SQLException, ClassNotFoundException
@@ -170,10 +166,9 @@ public class HBulkLoadClient
     if (sampleTblDDL.length() > 0)
     {
       Class.forName("org.apache.hive.jdbc.HiveDriver");
-      Connection conn = DriverManager.getConnection("jdbc:hive2://", "hive", "");
+      java.sql.Connection conn = DriverManager.getConnection("jdbc:hive2://", "hive", "");
       Statement stmt = conn.createStatement();
       stmt.execute("drop table if exists " + sampleTblName);
-      //System.out.println("*** DDL for Hive sample table is: " + sampleTblDDL);
       stmt.execute(sampleTblDDL);
     }
 
@@ -242,7 +237,7 @@ public class HBulkLoadClient
      bbRowIDs = (ByteBuffer)rowIDs;
      bbRows = (ByteBuffer)rows;
      numRows = bbRowIDs.getShort();
-     HTableClient htc = new HTableClient();
+     HTableClient htc = new HTableClient(HBaseClient.getConnection());
      long now = System.currentTimeMillis();
      for (short rowNum = 0; rowNum < numRows; rowNum++) 
      {
@@ -288,10 +283,10 @@ public class HBulkLoadClient
   private boolean createSnapshot( String tableName, String snapshotName)
       throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException
   {
-    HBaseAdmin admin = null;
+    Admin admin = null;
     try 
     {
-      admin = new HBaseAdmin(config);
+      admin = connection_.getAdmin();
       List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
       if (! lstSnaps.isEmpty())
       {
@@ -304,13 +299,11 @@ public class HBulkLoadClient
             }
         }
       }
-      admin.snapshot(snapshotName, tableName);
+      admin.snapshot(snapshotName, TableName.valueOf(tableName));
     }
     finally
     {
-      //close HBaseAdmin instance 
-      if (admin !=null)
-        admin.close();
+      admin.close();
     }
     return true;
   }
@@ -318,22 +311,21 @@ public class HBulkLoadClient
   private boolean restoreSnapshot( String snapshotName, String tableName)
       throws IOException, RestoreSnapshotException
   {
-    HBaseAdmin admin = null;
+    Admin admin = null;
     try
     {
-      admin = new HBaseAdmin(config);
-      if (! admin.isTableDisabled(tableName))
-          admin.disableTable(tableName);
+      admin = connection_.getAdmin();
+      TableName table = TableName.valueOf(tableName);
+      if (! admin.isTableDisabled(table))
+          admin.disableTable(table);
       
       admin.restoreSnapshot(snapshotName);
   
-      admin.enableTable(tableName);
+      admin.enableTable(table);
     }
     finally
     {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
+       admin.close();
     }
     return true;
   }
@@ -341,11 +333,11 @@ public class HBulkLoadClient
       throws IOException
   {
     
-    HBaseAdmin admin = null;
+    Admin admin = null;
     boolean snapshotExists = false;
     try
     {
-      admin = new HBaseAdmin(config);
+      admin = connection_.getAdmin();
       List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
       if (! lstSnaps.isEmpty())
       {
@@ -362,15 +354,14 @@ public class HBulkLoadClient
       }
       if (!snapshotExists)
         return true;
-      if (admin.isTableDisabled(tableName))
-          admin.enableTable(tableName);
+      TableName table = TableName.valueOf(tableName);
+      if (admin.isTableDisabled(table))
+          admin.enableTable(table);
       admin.deleteSnapshot(snapshotName);
     }
     finally 
     {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
+       admin.close();
     }
     return true;
   }
@@ -378,7 +369,7 @@ public class HBulkLoadClient
   private void doSnapshotNBulkLoad(Path hFilePath, String tableName, HTable table, LoadIncrementalHFiles loader, boolean snapshot)
       throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException, RestoreSnapshotException
   {
-    HBaseAdmin admin = new HBaseAdmin(config);
+    Admin admin = connection_.getAdmin();
     String snapshotName= null;
     if (snapshot)
     {
@@ -411,6 +402,7 @@ public class HBulkLoadClient
         deleteSnapshot(snapshotName, tableName);
         if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
       }
+      admin.close();
     }
     
   }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HTableClient.java b/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
index ec33084..2474663 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
 import org.apache.hadoop.hbase.client.transactional.RMInterface;
@@ -88,7 +88,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.NullComparator;
 
 import org.apache.hadoop.hbase.client.TableSnapshotScanner;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.fs.FileSystem;
@@ -104,7 +104,7 @@ public class HTableClient {
 	private boolean useTRex;
 	private boolean useTRexScanner;
 	private String tableName;
-
+        private static Connection connection;
 	private ResultScanner scanner = null;
         private ScanHelper scanHelper = null;
 	Result[] getResultSet = null;
@@ -133,7 +133,7 @@ public class HTableClient {
 	 class SnapshotScanHelper
 	 {
 	   Path snapRestorePath = null;
-	   HBaseAdmin admin  = null;
+	   Admin admin  = null;
 	   Configuration conf = null;
 	   SnapshotDescription snpDesc = null;
 	   String tmpLocation = null;
@@ -142,8 +142,7 @@ public class HTableClient {
 	   SnapshotScanHelper( Configuration cnfg , String tmpLoc, String snapName) 
 	       throws IOException
 	   {
-	     conf = cnfg;
-	     admin = new HBaseAdmin(conf);
+             conf = cnfg;
 	     tmpLocation = tmpLoc;
 	     setSnapshotDescription(snapName);
 	     Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
@@ -177,7 +176,10 @@ public class HTableClient {
 	   boolean snapshotExists() throws IOException
 	   {
 	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.snapshotExists() called. ");
-	     return !admin.listSnapshots(snpDesc.getName()).isEmpty();
+             Admin admin = connection.getAdmin();
+	     boolean retcode = !(admin.listSnapshots(snpDesc.getName()).isEmpty());
+             admin.close();
+             return retcode; 
 	   }
 
 	   void deleteSnapshot() throws IOException
@@ -185,13 +187,16 @@ public class HTableClient {
 	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot() called. ");
 	     if (snapshotExists())
 	     {
+               Admin admin = connection.getAdmin();
 	       admin.deleteSnapshot(snpDesc.getName());
+               admin.close();
 	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " deleted.");
 	     }
 	     else
 	     {
 	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " does not exist.");
 	     }
+    
 	   }
 	   void deleteRestorePath() throws IOException
 	   {
@@ -219,7 +224,7 @@ public class HTableClient {
 	       try
 	       {
                  ioExc = null;
-	         scanner = new TableSnapshotScanner(table.getConfiguration(), snapHelper.getSnapRestorePath(), snapHelper.getSnapshotName(), scan);
+	         scanner = new TableSnapshotScanner(connection.getConfiguration(), snapHelper.getSnapRestorePath(), snapHelper.getSnapshotName(), scan);
 	       }
 	       catch(IOException e )
 	       {
@@ -263,6 +268,10 @@ public class HTableClient {
 	   }
 	 }
 
+        public HTableClient(Connection connection) {
+           this.connection = connection;
+        }
+
 	class ScanHelper implements Callable {
             public Result[] call() throws IOException {
                 return scanner.next(numRowsCached);
@@ -344,7 +353,7 @@ public class HTableClient {
 		}
 	    }
 
-	    table = new RMInterface(tblName);
+	    table = new RMInterface(tblName, connection);
 	    if (logger.isDebugEnabled()) logger.debug("Exit HTableClient::init, useTRex: " + this.useTRex + ", useTRexScanner: "
 	              + this.useTRexScanner + ", table object: " + table);
 	    return true;
@@ -997,7 +1006,7 @@ public class HTableClient {
 	  }
 	  else
 	  {
-	    snapHelper = new SnapshotScanHelper(table.getConfiguration(), tmpLoc,snapName);
+	    snapHelper = new SnapshotScanHelper(connection.getConfiguration(), tmpLoc,snapName);
 
 	    if (logger.isTraceEnabled()) 
 	      logger.trace("[Snapshot Scan] HTableClient.startScan(). useSnapshotScan: " + useSnapshotScan + 
@@ -1686,12 +1695,12 @@ public class HTableClient {
               boolean cacheBlocks, int numCacheRows) 
                           throws IOException, Throwable {
 
-		    Configuration customConf = table.getConfiguration();
+		    Configuration customConf = connection.getConfiguration();
                     long rowCount = 0;
 
                     if (transID > 0) {
 		      TransactionalAggregationClient aggregationClient = 
-                          new TransactionalAggregationClient(customConf);
+                          new TransactionalAggregationClient(customConf, connection);
 		      Scan scan = new Scan();
 		      scan.addFamily(colFamily);
 		      scan.setCacheBlocks(false);
@@ -1771,11 +1780,12 @@ public class HTableClient {
            if (logger.isTraceEnabled()) logger.trace("Enter close() " + tableName);
            if (table != null) 
            {
+/*
               if (clearRegionCache)
               {
-                 HConnection connection = table.getConnection();
                  connection.clearRegionCache(tableName.getBytes());
               }
+*/
               table.close();
               table = null;
            }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
index 3e6fdf0..28869fe 100644
--- a/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
+++ b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.TableSnapshotScanner;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -77,7 +77,7 @@ public class SequenceFileWriter {
 
     static Logger logger = Logger.getLogger(SequenceFileWriter.class.getName());
     Configuration conf = null;           // File system configuration
-    HBaseAdmin admin = null;
+    private Connection connection;
     
     SequenceFile.Writer writer = null;
 
@@ -92,6 +92,7 @@ public class SequenceFileWriter {
     SequenceFileWriter() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
     {
       init("", "");
+      conf = connection.getConfiguration();
       conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
     }
     
@@ -308,19 +309,16 @@ public class SequenceFileWriter {
       , ServiceException
   {
     logger.debug("SequenceFileWriter.init(" + zkServers + ", " + zkPort + ") called.");
-    if (conf != null)		
-       return true;		
-    conf = HBaseConfiguration.create();		
-    HBaseAdmin.checkHBaseAvailable(conf);
+    connection = HBaseClient.getConnection();
     return true;
   }
   
   public boolean createSnapshot( String tableName, String snapshotName)
       throws IOException
   {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
-      admin.snapshot(snapshotName, tableName);
+      Admin admin = connection.getAdmin();
+      admin.snapshot(snapshotName, TableName.valueOf(tableName));
+      admin.close();
       if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Snapshot created: " + snapshotName);
     return true;
   }
@@ -328,10 +326,10 @@ public class SequenceFileWriter {
   public boolean verifySnapshot( String tableName, String snapshotName)
       throws IOException
   {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
+      Admin admin = connection.getAdmin();
       List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-
+      try 
+      {
       for (SnapshotDescription snpd : lstSnaps) 
       {
         if (snpd.getName().compareTo(snapshotName) == 0 && 
@@ -341,6 +339,9 @@ public class SequenceFileWriter {
           return true;
         }
       }
+      } finally {
+        admin.close();
+      }
     return false;
   }
  
@@ -348,20 +349,11 @@ public class SequenceFileWriter {
       throws MasterNotRunningException, IOException, SnapshotCreationException, 
              InterruptedException, ZooKeeperConnectionException, ServiceException
   {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
+      Admin admin = connection.getAdmin();
       admin.deleteSnapshot(snapshotName);
+      admin.close();
       if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Snapshot deleted: " + snapshotName);
       return true;
   }
 
-  public boolean release()  throws IOException
-  {
-    if (admin != null)
-    {
-      admin.close();
-      admin = null;
-    }
-    return true;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/7fe96649/core/sql/src/main/java/org/trafodion/sql/TrafRegionStats.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/TrafRegionStats.java b/core/sql/src/main/java/org/trafodion/sql/TrafRegionStats.java
index fe583a0..9cf4f67 100644
--- a/core/sql/src/main/java/org/trafodion/sql/TrafRegionStats.java
+++ b/core/sql/src/main/java/org/trafodion/sql/TrafRegionStats.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
 
 import java.io.IOException;
@@ -75,16 +75,8 @@ public class TrafRegionStats {
         return Collections.unmodifiableMap(sizeInfoMap);
     }
     
-    /**
-     * Computes size of each region for table and given column families.
-     * */
-    public TrafRegionStats(HTable table) throws IOException {
-        this(table, new HBaseAdmin(table.getConfiguration()));
-    }
-    
-    public TrafRegionStats (HTable table, HBaseAdmin admin) throws IOException {
+    public TrafRegionStats (HTable table, Admin admin) throws IOException {
         
-        try {
             if (!enabled(table.getConfiguration())) {
                 System.out.println("Region size calculation disabled.");
                 return;
@@ -131,14 +123,10 @@ public class TrafRegionStats {
 
                         sizeInfoMap.put(regionId, sizeInfo);
                         
-                        //                        System.out.println("RegionNameAsString " + regionLoad.getNameAsString());
                      }
                 }
             }
             
-        } finally {
-            admin.close();
-        }
     }
 }
 


Mime
View raw message