trafodion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dbirds...@apache.org
Subject [6/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co
Date Fri, 02 Oct 2015 16:16:35 GMT
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HBulkLoadClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBulkLoadClient.java b/core/sql/executor/HBulkLoadClient.java
deleted file mode 100644
index ff574d4..0000000
--- a/core/sql/executor/HBulkLoadClient.java
+++ /dev/null
@@ -1,533 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Iterator;
-import java.io.File;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.compress.*;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.regionserver.BloomType; 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.trafodion.sql.HBaseAccess.HTableClient;
-//import org.trafodion.sql.HBaseAccess.HBaseClient;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.io.compress.CodecPool;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.hbase.TableName;
-
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-
-import org.apache.hive.jdbc.HiveDriver;
-import java.sql.Connection;
-import java.sql.Statement;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.lang.ClassNotFoundException;
-
-public class HBulkLoadClient
-{
-  
-  private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx");
-  private final static FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x");
-  private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir";
-  private final static long MAX_HFILE_SIZE = 10737418240L; //10 GB
-  
-  public static int BLOCKSIZE = 64*1024;
-  public static String COMPRESSION = Compression.Algorithm.NONE.getName();
-  String lastError;
-  static Logger logger = Logger.getLogger(HBulkLoadClient.class.getName());
-  Configuration config;
-  HFile.Writer writer;
-  String hFileLocation;
-  String hFileName;
-  long maxHFileSize = MAX_HFILE_SIZE;
-  FileSystem fileSys = null;
-  String compression = COMPRESSION;
-  int blockSize = BLOCKSIZE;
-  DataBlockEncoding dataBlockEncoding = DataBlockEncoding.NONE;
-  FSDataOutputStream fsOut = null;
-
-  public HBulkLoadClient()
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient() called.");
-  }
-
-  public HBulkLoadClient(Configuration conf) throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient(...) called.");
-    config = conf;
-  }
-
-  public String getLastError() {
-    return lastError;
-  }
-
-  void setLastError(String err) {
-      lastError = err;
-  }
-  public boolean initHFileParams(String hFileLoc, String hFileNm, long userMaxSize /*in MBs*/, String tblName,
-                                 String sampleTblName, String sampleTblDDL) 
-  throws UnsupportedOperationException, IOException, SQLException, ClassNotFoundException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.initHFileParams() called.");
-    
-    hFileLocation = hFileLoc;
-    hFileName = hFileNm;
-    
-    HTable myHTable = new HTable(config, tblName);
-    HTableDescriptor hTbaledesc = myHTable.getTableDescriptor();
-    HColumnDescriptor[] hColDescs = hTbaledesc.getColumnFamilies();
-    if (hColDescs.length > 2 )  //2 column family , 1 for user data, 1 for transaction metadata
-    {
-      myHTable.close();
-      throw new UnsupportedOperationException ("only two families are supported.");
-    }
-    
-    compression= hColDescs[0].getCompression().getName();
-    blockSize= hColDescs[0].getBlocksize();
-    dataBlockEncoding = hColDescs[0].getDataBlockEncoding();
-    
-    if (userMaxSize == 0)
-    {
-      if (hTbaledesc.getMaxFileSize()==-1)
-      {
-        maxHFileSize = MAX_HFILE_SIZE;
-      }
-      else
-      {
-        maxHFileSize = hTbaledesc.getMaxFileSize();
-      }
-    }
-    else 
-      maxHFileSize = userMaxSize * 1024 *1024;  //maxSize is in MBs
-
-    myHTable.close();
-
-    if (sampleTblDDL.length() > 0)
-    {
-      Class.forName("org.apache.hive.jdbc.HiveDriver");
-      Connection conn = DriverManager.getConnection("jdbc:hive2://", "hive", "");
-      Statement stmt = conn.createStatement();
-      stmt.execute("drop table if exists " + sampleTblName);
-      //System.out.println("*** DDL for Hive sample table is: " + sampleTblDDL);
-      stmt.execute(sampleTblDDL);
-    }
-
-    return true;
-  }
-  public boolean doCreateHFile() throws IOException, URISyntaxException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile() called.");
-    
-    if (hFileLocation == null )
-      throw new NullPointerException(hFileLocation + " is not set");
-    if (hFileName == null )
-      throw new NullPointerException(hFileName + " is not set");
-    
-    closeHFile();
-    
-    if (fileSys == null)
-     fileSys = FileSystem.get(config); 
-
-    Path hfilePath = new Path(new Path(hFileLocation ), hFileName + "_" +  System.currentTimeMillis());
-    hfilePath = hfilePath.makeQualified(hfilePath.toUri(), null);
-
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + hfilePath);
-
-    try
-    {
-      HFileContext hfileContext = new HFileContextBuilder()
-                                 .withBlockSize(blockSize)
-                                 .withCompression(Compression.getCompressionAlgorithmByName(compression))
-                                 .withDataBlockEncoding(dataBlockEncoding)
-                                 .build();
-
-      writer =    HFile.getWriterFactory(config, new CacheConfig(config))
-                     .withPath(fileSys, hfilePath)
-                     .withFileContext(hfileContext)
-                     .withComparator(KeyValue.COMPARATOR)
-                     .create();
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + writer.getPath() + "Created");
-    }
-    catch (IOException e)
-    {
-       if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile Exception" + e.getMessage());
-       throw e;
-    }
-    return true;
-  }
-  
-  public boolean isNewFileNeeded() throws IOException
-  {
-    if (writer == null)
-      return true;
-    
-    if (fileSys == null)
-      fileSys = FileSystem.get(writer.getPath().toUri(),config);
-    
-    if (fileSys.getFileStatus(writer.getPath()).getLen() > maxHFileSize)
-     return true;
-
-    return false;
-  }
-
-  public boolean addToHFile(short rowIDLen, Object rowIDs,
-                Object rows) throws IOException, URISyntaxException
-  {
-     if (logger.isDebugEnabled()) logger.debug("Enter addToHFile() ");
-     Put put;
-    if (isNewFileNeeded())
-    {
-      doCreateHFile();
-    }
-     ByteBuffer bbRows, bbRowIDs;
-     short numCols, numRows;
-     short colNameLen;
-     int colValueLen;
-     byte[] colName, colValue, rowID;
-     short actRowIDLen;
-
-     bbRowIDs = (ByteBuffer)rowIDs;
-     bbRows = (ByteBuffer)rows;
-     numRows = bbRowIDs.getShort();
-     HTableClient htc = new HTableClient();
-     long now = System.currentTimeMillis();
-     for (short rowNum = 0; rowNum < numRows; rowNum++) 
-     {
-        byte rowIDSuffix  = bbRowIDs.get();
-        if (rowIDSuffix == '1')
-           actRowIDLen = (short)(rowIDLen+1);
-        else
-           actRowIDLen = rowIDLen;
-        rowID = new byte[actRowIDLen];
-        bbRowIDs.get(rowID, 0, actRowIDLen);
-        numCols = bbRows.getShort();
-        for (short colIndex = 0; colIndex < numCols; colIndex++)
-        {
-            colNameLen = bbRows.getShort();
-            colName = new byte[colNameLen];
-            bbRows.get(colName, 0, colNameLen);
-            colValueLen = bbRows.getInt();
-            colValue = new byte[colValueLen];
-            bbRows.get(colValue, 0, colValueLen);
-            KeyValue kv = new KeyValue(rowID,
-                                htc.getFamily(colName), 
-                                htc.getName(colName), 
-                                now,
-                                colValue);
-            writer.append(kv);
-        } 
-    }
-    if (logger.isDebugEnabled()) logger.debug("End addToHFile() ");
-       return true;
-  }
-
-  public boolean closeHFile() throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.closeHFile() called." + ((writer == null) ? "NULL" : "NOT NULL"));
-
-    if (writer == null)
-      return false;
-    
-    writer.close();
-    return true;
-  }
-
-  private boolean createSnapshot( String tableName, String snapshotName)
-  throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException
-  {
-    HBaseAdmin admin = null;
-    try 
-    {
-      admin = new HBaseAdmin(config);
-      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-      if (! lstSnaps.isEmpty())
-      {
-        for (SnapshotDescription snpd : lstSnaps) 
-        {
-            if (snpd.getName().compareTo(snapshotName) == 0)
-            {
-              if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() -- deleting: " + snapshotName + " : " + snpd.getName());
-              admin.deleteSnapshot(snapshotName);
-            }
-        }
-      }
-      admin.snapshot(snapshotName, tableName);
-   }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to teh parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally
-    {
-      //close HBaseAdmin instance 
-      if (admin !=null)
-        admin.close();
-    }
-    return true;
-  }
-  
-  private boolean restoreSnapshot( String snapshotName, String tableName)
-  throws IOException, RestoreSnapshotException
-  {
-    HBaseAdmin admin = null;
-    try
-    {
-      admin = new HBaseAdmin(config);
-      if (! admin.isTableDisabled(tableName))
-          admin.disableTable(tableName);
-      
-      admin.restoreSnapshot(snapshotName);
-  
-      admin.enableTable(tableName);
-    }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to the parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally
-    {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
-    }
-
-    return true;
-  }
-  private boolean deleteSnapshot( String snapshotName, String tableName)
-      throws IOException
-  {
-    
-    HBaseAdmin admin = null;
-    boolean snapshotExists = false;
-    try
-    {
-      admin = new HBaseAdmin(config);
-      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-      if (! lstSnaps.isEmpty())
-      {
-        for (SnapshotDescription snpd : lstSnaps) 
-        {
-          //System.out.println("here 1: " + snapshotName + snpd.getName());
-          if (snpd.getName().compareTo(snapshotName) == 0)
-          {
-            //System.out.println("deleting: " + snapshotName + " : " + snpd.getName());
-            snapshotExists = true;
-            break;
-          }
-        }
-      }
-      if (!snapshotExists)
-        return true;
-      if (admin.isTableDisabled(tableName))
-          admin.enableTable(tableName);
-      admin.deleteSnapshot(snapshotName);
-    }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to the parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally 
-    {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
-    }
-    return true;
-  }
-  
-  private void doSnapshotNBulkLoad(Path hFilePath, String tableName, HTable table, LoadIncrementalHFiles loader, boolean snapshot)
-  throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException, RestoreSnapshotException
-  {
-    HBaseAdmin admin = new HBaseAdmin(config);
-    String snapshotName= null;
-    if (snapshot)
-    {
-      snapshotName = tableName + "_SNAPSHOT";
-      createSnapshot(tableName, snapshotName);
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot created: " + snapshotName);
-    }
-    try
-    {
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load started ");
-      loader.doBulkLoad(hFilePath, table);
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load is done ");
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - Exception: " + e.toString());
-      if (snapshot)
-      {
-        restoreSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot restored: " + snapshotName);
-        deleteSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
-        throw e;
-      }
-    }
-    finally
-    {
-      if  (snapshot)
-      {
-        deleteSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
-      }
-    }
-    
-  }
-  public boolean doBulkLoad(String prepLocation, String tableName, boolean quasiSecure, boolean snapshot) throws Exception
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - start");
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - Prep Location: " + prepLocation + 
-                                             ", Table Name:" + tableName + 
-                                             ", quasisecure : " + quasiSecure +
-                                             ", snapshot: " + snapshot);
-
-      
-    HTable table = new HTable(config, tableName);
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(config);    
-    Path prepPath = new Path(prepLocation );
-    prepPath = prepPath.makeQualified(prepPath.toUri(), null);
-    FileSystem prepFs = FileSystem.get(prepPath.toUri(),config);
-    
-    Path[] hFams = FileUtil.stat2Paths(prepFs.listStatus(prepPath));
-
-    if (quasiSecure)
-    {
-      throw new Exception("HBulkLoadClient.doBulkLoad() - cannot perform load. Trafodion on secure HBase mode is not implemented yet");
-    }
-    else
-    {
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfiles permissions");
-      for (Path hfam : hFams) 
-      {
-         Path[] hfiles = FileUtil.stat2Paths(prepFs.listStatus(hfam));
-         prepFs.setPermission(hfam,PERM_ALL_ACCESS );
-         for (Path hfile : hfiles)
-         {
-           if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfile permissions:" + hfile);
-           prepFs.setPermission(hfile,PERM_ALL_ACCESS);
-           
-         }
-         //create _tmp dir used as temp space for Hfile processing
-         FileSystem.mkdirs(prepFs, new Path(hfam,"_tmp"), PERM_ALL_ACCESS);
-      }
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load started. Loading directly from preparation directory");
-      doSnapshotNBulkLoad(prepPath,tableName,  table,  loader,  snapshot);
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load is done ");
-    }
-    return true;
-  }
-
-  public boolean bulkLoadCleanup(String location) throws Exception
-  {
-      Path dir = new Path(location );
-      dir = dir.makeQualified(dir.toUri(), null);
-      FileSystem fs = FileSystem.get(dir.toUri(),config);
-      fs.delete(dir, true);
-      
-      return true;
-
-  }
-  
-  public boolean release( ) throws IOException {
-    if (writer != null)
-    {
-       writer.close();
-       writer = null;
-    }
-    if (fileSys !=null)
-    {
-      fileSys.close();
-      fileSys = null;
-    }
-    if (config != null) 
-    {
-      config = null;
-    }
-    if (hFileLocation != null)
-    {
-      hFileLocation = null;
-    }
-    if (hFileName != null)
-    {
-      hFileName = null;
-    }
-
-    if (compression != null)
-    {
-      compression = null;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HTableClient.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/HTableClient.h b/core/sql/executor/HTableClient.h
deleted file mode 100644
index 1a0faa6..0000000
--- a/core/sql/executor/HTableClient.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include <jni.h>
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient
-#ifdef __cplusplus
-extern "C" {
-#endif
-#undef org_trafodion_sql_HBaseAccess_HTableClient_GET_ROW
-#define org_trafodion_sql_HBaseAccess_HTableClient_GET_ROW 1L
-#undef org_trafodion_sql_HBaseAccess_HTableClient_BATCH_GET
-#define org_trafodion_sql_HBaseAccess_HTableClient_BATCH_GET 2L
-#undef org_trafodion_sql_HBaseAccess_HTableClient_SCAN_FETCH
-#define org_trafodion_sql_HBaseAccess_HTableClient_SCAN_FETCH 3L
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    setResultInfo
- * Signature: (J[I[I[I[I[I[I[J[[B[[B[II)I
- */
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setResultInfo
-  (JNIEnv *, jobject, jlong, jintArray, jintArray, jintArray, jintArray, jintArray, jintArray, jlongArray, jobjectArray, jobjectArray, jintArray, jint, jint);
-
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    cleanup
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_cleanup
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    setJavaObject
- * Signature: (J)I
- */
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setJavaObject
-  (JNIEnv *, jobject, jlong);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper
-#ifdef __cplusplus
-extern "C" {
-#endif
-#ifdef __cplusplus
-}
-#endif
-#endif
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper
-#ifdef __cplusplus
-extern "C" {
-#endif
-#ifdef __cplusplus
-}
-#endif
-#endif

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HTableClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HTableClient.java b/core/sql/executor/HTableClient.java
deleted file mode 100644
index a1bb00f..0000000
--- a/core/sql/executor/HTableClient.java
+++ /dev/null
@@ -1,1334 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-import org.trafodion.sql.HBaseAccess.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.NavigableSet;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.nio.ByteBuffer;
-import java.nio.LongBuffer;
-import java.nio.ByteOrder;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
-import org.apache.hadoop.hbase.client.transactional.RMInterface;
-import org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient;
-import org.apache.hadoop.hbase.client.transactional.TransactionState;
-
-import org.apache.log4j.Logger;
-
-// H98 coprocessor needs
-import java.util.*;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.client.coprocessor.*;
-import org.apache.hadoop.hbase.coprocessor.*;
-import org.apache.hadoop.hbase.coprocessor.example.*;
-import org.apache.hadoop.hbase.ipc.*;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.*;
-import org.apache.hadoop.hbase.util.*;
-
-//import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
-import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
-import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
-
-// classes to do column value filtering
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.RandomRowFilter;
-
-import org.apache.hadoop.hbase.client.TableSnapshotScanner;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileUtil;
-import java.util.UUID;
-import java.security.InvalidParameterException;
-
-public class HTableClient {
-	private static final int GET_ROW = 1;
-	private static final int BATCH_GET = 2;
-	private static final int SCAN_FETCH = 3;
-	private boolean useTRex;
-	private boolean useTRexScanner;
-	private String tableName;
-
-	private ResultScanner scanner = null;
-        private ScanHelper scanHelper = null;
-	Result[] getResultSet = null;
-	String lastError;
-        RMInterface table = null;
-        ByteArrayList coprocAggrResult = null;
-        private boolean writeToWAL = false;
-	int numRowsCached = 1;
-	int numColsInScan = 0;
-	int[] kvValLen = null;
-	int[] kvValOffset = null;
-	int[] kvQualLen = null;
-	int[] kvQualOffset = null;
-	int[] kvFamLen = null;
-	int[] kvFamOffset = null;
-	long[] kvTimestamp = null;
-	byte[][] kvBuffer = null;
-	byte[][] rowIDs = null;
-	int[] kvsPerRow = null;
-        static ExecutorService executorService = null;
-        Future future = null;
-	boolean preFetch = false;
-	int fetchType = 0;
-	long jniObject = 0;
-	SnapshotScanHelper snapHelper = null;
-
-	 class SnapshotScanHelper
-	 {
-	   Path snapRestorePath = null;
-	   HBaseAdmin admin  = null;
-	   Configuration conf = null;
-	   SnapshotDescription snpDesc = null;
-	   String tmpLocation = null;
-	   FileSystem fs  = null;
-
-	   SnapshotScanHelper( Configuration cnfg , String tmpLoc, String snapName) 
-	       throws IOException
-	   {
-	     conf = cnfg;
-	     admin = new HBaseAdmin(conf);
-	     tmpLocation = tmpLoc;
-	     setSnapshotDescription(snapName);
-	     Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
-	     fs = rootDir.getFileSystem(conf);
-	     setSnapRestorePath();
-	   }
-
-	   String getTmpLocation()
-	   {
-	     return tmpLocation;
-	   }
-	   String getSnapshotName()
-	   {
-	     if (snpDesc == null)
-	       return null;
-	     return snpDesc.getName();
-	   }
-	   void setSnapRestorePath() throws IOException
-	   {
-	     String restoreDirStr = tmpLocation + getSnapshotDescription().getName(); ;
-	     snapRestorePath = new Path(restoreDirStr);
-	     snapRestorePath = snapRestorePath.makeQualified(fs.getUri(), snapRestorePath);
-	   }
-	   Path getSnapRestorePath() throws IOException
-	   {
-	     return snapRestorePath;
-	   }
-	   boolean snapshotExists() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.snapshotExists() called. ");
-	     return !admin.listSnapshots(snpDesc.getName()).isEmpty();
-	   }
-	   void deleteSnapshot() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot() called. ");
-	     if (snapshotExists())
-	     {
-	       admin.deleteSnapshot(snpDesc.getName());
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " deleted.");
-	     }
-	     else
-	     {
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " does not exist.");
-	     }
-	   }
-	   void deleteRestorePath() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath() called. ");
-	     if (fs.exists(snapRestorePath))
-	     {
-	       fs.delete(snapRestorePath, true);
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath + " deleted.");
-	     }
-	     else
-	     {
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath  + " does not exist.");
-	     }
-	   }
-	   
-	   void createTableSnapshotScanner(int timeout, int slp, long nbre, Scan scan) throws InterruptedException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner() called. ");
-	     int xx=0;
-	     while (xx < timeout)
-	     {
-         xx++;
-	       scanner = null;
-	       try
-	       {
-	         scanner = new TableSnapshotScanner(table.getConfiguration(), snapHelper.getSnapRestorePath(), snapHelper.getSnapshotName(), scan);
-	       }
-	       catch(IOException e )
-	       {
-	         if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + nbre  + 
-	             " snapshot " + snpDesc.getName() + " TableSnapshotScanner Exception :" + e);
-	         Thread.sleep(slp);
-	         continue;
-	       }
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + 
-	           nbre + " snapshot " + snpDesc.getName() +  " TableSnapshotScanner Done - Scanner:" + scanner );
-	       break;
-	     }
-	   }
-	   void setSnapshotDescription( String snapName)
-	   {
-       if (snapName == null )
-         throw new InvalidParameterException ("snapshotName is null.");
-       
-	     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
-	     builder.setTable(Bytes.toString(table.getTableName()));
-	     builder.setName(snapName);
-	     builder.setType(SnapshotDescription.Type.FLUSH);
-	     snpDesc = builder.build();
-	   }
-	   SnapshotDescription getSnapshotDescription()
-	   {
-	     return snpDesc;
-	   }
-
-	   public void release() throws IOException
-	   {
-	     if (admin != null)
-	     {
-	       admin.close();
-	       admin = null;
-	     }
-	   }
-	 }
-
-	class ScanHelper implements Callable {
-            public Result[] call() throws Exception {
-                return scanner.next(numRowsCached);
-            }
-        }
-	 
-	static Logger logger = Logger.getLogger(HTableClient.class.getName());;
-
-        static public  byte[] getFamily(byte[] qc) {
-	   byte[] family = null;
-
-	   if (qc != null && qc.length > 0) {
-	       int pos = Bytes.indexOf(qc, (byte) ':');
-	       if (pos == -1) 
-	          family = Bytes.toBytes("cf1");
-	       else
-	          family = Arrays.copyOfRange(qc, 0, pos);
-           }	
-	   return family;
-	}
-
-        static public byte[] getName(byte[] qc) {
-	   byte[] name = null;
-
-	   if (qc != null && qc.length > 0) {
-	      int pos = Bytes.indexOf(qc, (byte) ':');
-	      if (pos == -1) 
-	         name = qc;
-	      else
-	         name = Arrays.copyOfRange(qc, pos + 1, qc.length);
-	   }	
-	   return name;
-	}
-
-	public boolean setWriteBufferSize(long writeBufferSize) throws IOException {
-		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteBufferSize, size  : " + writeBufferSize);
-	    table.setWriteBufferSize(writeBufferSize);
-	    return true;
-	  }
-	 public long getWriteBufferSize() {
-		 if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::getWriteBufferSize, size return : " + table.getWriteBufferSize());
-		 return table.getWriteBufferSize();
-	 }
-	public boolean setWriteToWAL(boolean v) {
-		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteToWALL, size  : " + v);
-	    writeToWAL = v;
-	    return true;
-	  }
- 
-	public boolean init(String tblName,
-              boolean useTRex) throws IOException 
-        {
-	    if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::init, tableName: " + tblName);
-	    this.useTRex = useTRex;
-	    tableName = tblName;
-	    
-	    if ( !this.useTRex ) {
-		this.useTRexScanner = false;
-	    }
-	    else {
-
-		// If the parameter useTRex is false, then do not go thru this logic
-
-		String useTransactions = System.getenv("USE_TRANSACTIONS");
-		if (useTransactions != null) {
-		    int lv_useTransactions = (Integer.parseInt(useTransactions));
-		    if (lv_useTransactions == 0) {
-			this.useTRex = false;
-		    }
-		}
-	    
-		this.useTRexScanner = true;
-		String useTransactionsScanner = System.getenv("USE_TRANSACTIONS_SCANNER");
-		if (useTransactionsScanner != null) {
-		    int lv_useTransactionsScanner = (Integer.parseInt(useTransactionsScanner));
-		    if (lv_useTransactionsScanner == 0) {
-			this.useTRexScanner = false;
-		    }
-		}
-	    }
-
-	    table = new RMInterface(tblName);
-	    if (logger.isDebugEnabled()) logger.debug("Exit HTableClient::init, table object: " + table);
-	    return true;
-	}
-
-	public String getLastError() {
-		String ret = lastError;
-		lastError = null;
-		return ret;
-	}
-
-	void setLastError(String err) {
-		lastError = err;
-	}
-
-	String getTableName() {
-		return tableName;
-	}
-
-	String getHTableName() {
-		if (table == null)
-			return null;
-		else
-			return new String(table.getTableName());
-	}
-
-	void resetAutoFlush() {
-		table.setAutoFlush(true, true);
-	}
-
-	public boolean startScan(long transID, byte[] startRow, byte[] stopRow,
-                                 Object[]  columns, long timestamp,
-                                 boolean cacheBlocks, int numCacheRows,
-                                 Object[] colNamesToFilter, 
-                                 Object[] compareOpList, 
-                                 Object[] colValuesToCompare,
-                                 float samplePercent,
-                                 boolean inPreFetch,
-                                 boolean useSnapshotScan,
-                                 int snapTimeout,
-                                 String snapName,
-                                 String tmpLoc,
-                                 int espNum,
-                                 int versions)
-	        throws IOException, Exception {
-	  if (logger.isTraceEnabled()) logger.trace("Enter startScan() " + tableName + " txid: " + transID+ " CacheBlocks: " + cacheBlocks + " numCacheRows: " + numCacheRows + " Bulkread: " + useSnapshotScan);
-
-	  Scan scan;
-
-	  if (startRow != null && startRow.toString() == "")
-	    startRow = null;
-	  if (stopRow != null && stopRow.toString() == "")
-	    stopRow = null;
-
-	  if (startRow != null && stopRow != null)
-	    scan = new Scan(startRow, stopRow);
-	  else
-	    scan = new Scan();
-
-          if (versions != 0)
-            {
-              if (versions == -1)
-                scan.setMaxVersions();
-              else if (versions == -2)
-                {
-                  scan.setMaxVersions();
-                  scan.setRaw(true);
-                  columns = null;
-                }
-              else if (versions > 0)
-               {
-                 scan.setMaxVersions(versions);
-               }
-           }
-
-          if (cacheBlocks == true) {
-              scan.setCacheBlocks(true);
-              // Disable block cache for full table scan
-              if (startRow == null && stopRow == null)
-                  scan.setCacheBlocks(false);
-          }
-	  else
-              scan.setCacheBlocks(false);
-          
-	  scan.setCaching(numCacheRows);
-	  numRowsCached = numCacheRows;
-	  if (columns != null) {
-	    numColsInScan = columns.length;
-	    for (int i = 0; i < columns.length ; i++) {
-	      byte[] col = (byte[])columns[i];
-	      scan.addColumn(getFamily(col), getName(col));
-	    }
-	  }
-	  else
-	    numColsInScan = 0;
-	  if (colNamesToFilter != null) {
-	    FilterList list = new FilterList(FilterList.Operator.MUST_PASS_ALL);
-
-	    for (int i = 0; i < colNamesToFilter.length; i++) {
-	      byte[] colName = (byte[])colNamesToFilter[i];
-	      byte[] coByte = (byte[])compareOpList[i];
-	      byte[] colVal = (byte[])colValuesToCompare[i];
-
-	      if ((coByte == null) || (colVal == null)) {
-	        return false;
-	      }
-
-	      String coStr = new String(coByte);
-	      CompareOp co = CompareOp.valueOf(coStr);
-
-	      SingleColumnValueFilter filter1 = 
-	          new SingleColumnValueFilter(getFamily(colName), getName(colName), 
-	              co, colVal);
-	      list.addFilter(filter1);
-	    }
-
-	    if (samplePercent > 0.0f)
-	      list.addFilter(new RandomRowFilter(samplePercent));
-	    scan.setFilter(list);
-	  } else if (samplePercent > 0.0f) {
-	    scan.setFilter(new RandomRowFilter(samplePercent));
-	  }
-
-	  if (!useSnapshotScan || transID != 0)
-	  {
-	    if (useTRexScanner && (transID != 0)) {
-	      scanner = table.getScanner(transID, scan);
-	    } else {
-	      scanner = table.getScanner(scan);
-	    }
-	    if (logger.isTraceEnabled()) logger.trace("startScan(). After getScanner. Scanner: " + scanner);
-	  }
-	  else
-	  {
-	    snapHelper = new SnapshotScanHelper(table.getConfiguration(), tmpLoc,snapName);
-
-	    if (logger.isTraceEnabled()) 
-	      logger.trace("[Snapshot Scan] HTableClient.startScan(). useSnapshotScan: " + useSnapshotScan + 
-	                   " espNumber: " + espNum + 
-	                   " tmpLoc: " + snapHelper.getTmpLocation() + 
-	                   " snapshot name: " + snapHelper.getSnapshotName());
-	    
-	    if (!snapHelper.snapshotExists())
-	      throw new Exception ("Snapshot " + snapHelper.getSnapshotName() + " does not exist.");
-
-	    snapHelper.createTableSnapshotScanner(snapTimeout, 5, espNum, scan);
-	    if (scanner==null)
-	      throw new Exception("Cannot create Table Snapshot Scanner");
-	  }
-    
-          if (useSnapshotScan)
-             preFetch = false;
-          else
-	     preFetch = inPreFetch;
-	  if (preFetch)
-	  {
-	    scanHelper = new ScanHelper(); 
-            future = executorService.submit(scanHelper);
-	  }
-          fetchType = SCAN_FETCH;
-	  if (logger.isTraceEnabled()) logger.trace("Exit startScan().");
-	  return true;
-	}
-
-	public int  startGet(long transID, byte[] rowID, 
-                     Object[] columns,
-		     long timestamp) throws IOException {
-
-	    if (logger.isTraceEnabled()) logger.trace("Enter startGet(" + tableName + 
-			     " #cols: " + ((columns == null) ? 0:columns.length ) +
-			     " rowID: " + new String(rowID));
-		fetchType = GET_ROW;
-		Get get = new Get(rowID);
-		if (columns != null)
-		{
-			for (int i = 0; i < columns.length; i++) {
-				byte[] col = (byte[]) columns[i];
-				get.addColumn(getFamily(col), getName(col));
-			}
-			numColsInScan = columns.length;
-		}
-		else
-			numColsInScan = 0;
-			
-		Result getResult;
-		if (useTRex && (transID != 0)) {
-			getResult = table.get(transID, get);
-		} else {
-			getResult = table.get(get);
-		}
-		if (getResult == null
-                    || getResult.isEmpty()) {
-                        setJavaObject(jniObject);
-			return 0;
-		}
-		if (logger.isTraceEnabled()) logger.trace("startGet, result: " + getResult);
-		pushRowsToJni(getResult);
-		return 1;
-
-	}
-
-	// The TransactionalTable class is missing the batch get operation,
-	// so work around it.
-	private Result[] batchGet(long transactionID, List<Get> gets)
-			throws IOException {
-		if (logger.isTraceEnabled()) logger.trace("Enter batchGet(multi-row) " + tableName);
-		Result [] results = new Result[gets.size()];
-		int i=0;
-		for (Get g : gets) {
-			Result r = table.get(transactionID, g);
-			results[i++] = r;
-		}
-		return results;
-	}
-
-	public int startGet(long transID, Object[] rows,
-			Object[] columns, long timestamp)
-                        throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter startGet(multi-row) " + tableName);
-
-		List<Get> listOfGets = new ArrayList<Get>();
-		for (int i = 0; i < rows.length; i++) {
-			byte[] rowID = (byte[])rows[i]; 
-			Get get = new Get(rowID);
-			listOfGets.add(get);
-			if (columns != null)
-			{
-				for (int j = 0; j < columns.length; j++ ) {
-					byte[] col = (byte[])columns[j];
-					get.addColumn(getFamily(col), getName(col));
-				}
-			}
-		}
-		if (columns != null)
-			numColsInScan = columns.length;
-		else
-			numColsInScan = 0;
-		if (useTRex && (transID != 0)) {
-			getResultSet = batchGet(transID, listOfGets);
-                        fetchType = GET_ROW; 
-		} else {
-			getResultSet = table.get(listOfGets);
-			fetchType = BATCH_GET;
-		}
-		if (getResultSet != null && getResultSet.length > 0) {
-                	 pushRowsToJni(getResultSet);
-			return getResultSet.length;
-		}
-		else {
-			setJavaObject(jniObject);
-			return 0;
-		}
-	}
-
-	public int getRows(long transID, short rowIDLen, Object rowIDs,
-			Object[] columns)
-                        throws IOException {
-            
-		if (logger.isTraceEnabled()) logger.trace("Enter getRows " + tableName);
-
-		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
-		List<Get> listOfGets = new ArrayList<Get>();
-		short numRows = bbRowIDs.getShort();
-		short actRowIDLen ;
-		byte rowIDSuffix;
-		byte[] rowID;
-
-		for (int i = 0; i < numRows; i++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-			Get get = new Get(rowID);
-			listOfGets.add(get);
-			if (columns != null) {
-				for (int j = 0; j < columns.length; j++ ) {
-					byte[] col = (byte[])columns[j];
-					get.addColumn(getFamily(col), getName(col));
-				}
-			}
-		}
-		if (columns != null)
-			numColsInScan = columns.length;
-		else
-			numColsInScan = 0;
-		if (useTRex && (transID != 0)) {
-			getResultSet = batchGet(transID, listOfGets);
-                        fetchType = GET_ROW; 
-		} else {
-			getResultSet = table.get(listOfGets);
-			fetchType = BATCH_GET;
-		}
-		if (getResultSet.length != numRows)
-                   throw new IOException("Number of rows retunred is not equal to requested number of rows");
- 		pushRowsToJni(getResultSet);
-		return getResultSet.length;
-	}
-
-	public int fetchRows() throws IOException, 
-			InterruptedException, ExecutionException {
-		int rowsReturned = 0;
-
-		if (logger.isTraceEnabled()) logger.trace("Enter fetchRows(). Table: " + tableName);
-		if (getResultSet != null)
-		{
-			rowsReturned = pushRowsToJni(getResultSet);
-			getResultSet = null;
-			return rowsReturned;
-		}
-		else
-		{
-			if (scanner == null) {
-				String err = "  fetchRows() called before scanOpen().";
-				logger.error(err);
-				setLastError(err);
-				return -1;
-			}
-			Result[] result = null;
-			if (preFetch)
-			{
-				result = (Result[])future.get();
-				rowsReturned = pushRowsToJni(result);
-				future = null;
-				if ((rowsReturned <= 0 || rowsReturned < numRowsCached))
-					return rowsReturned;
-                                future = executorService.submit(scanHelper);
-			}
-			else
-			{
-				result = scanner.next(numRowsCached);
-				rowsReturned = pushRowsToJni(result);
-			}
-			return rowsReturned;
-		}
-	}
-
-	protected int pushRowsToJni(Result[] result) 
-			throws IOException {
-		if (result == null || result.length == 0)
-			return 0; 
-		int rowsReturned = result.length;
-		int numTotalCells = 0;
-		if (numColsInScan == 0)
-		{
-			for (int i = 0; i < result.length; i++) {	
-				numTotalCells += result[i].size();
-			}
-		}
-		else
-		// There can be maximum of 2 versions per kv
-		// So, allocate place holder to keep cell info
-		// for that many KVs
-			numTotalCells = 2 * rowsReturned * numColsInScan;
-		int numColsReturned;
-		Cell[] kvList;
-		Cell kv;
-
-		if (kvValLen == null ||
-	 		(kvValLen != null && numTotalCells > kvValLen.length))
-		{
-			kvValLen = new int[numTotalCells];
-			kvValOffset = new int[numTotalCells];
-			kvQualLen = new int[numTotalCells];
-			kvQualOffset = new int[numTotalCells];
-			kvFamLen = new int[numTotalCells];
-			kvFamOffset = new int[numTotalCells];
-			kvTimestamp = new long[numTotalCells];
-			kvBuffer = new byte[numTotalCells][];
-		}
-               
-		if (rowIDs == null || (rowIDs != null &&
-				rowsReturned > rowIDs.length))
-		{
-			rowIDs = new byte[rowsReturned][];
-			kvsPerRow = new int[rowsReturned];
-		}
-		int cellNum = 0;
-		boolean colFound = false;
-		for (int rowNum = 0; rowNum < rowsReturned ; rowNum++)
-		{
-			rowIDs[rowNum] = result[rowNum].getRow();
-			kvList = result[rowNum].rawCells();
-			numColsReturned = kvList.length;
-			if ((cellNum + numColsReturned) > numTotalCells)
-				throw new IOException("Insufficient cell array pre-allocated");
-			kvsPerRow[rowNum] = numColsReturned;
-			for (int colNum = 0 ; colNum < numColsReturned ; colNum++, cellNum++)
-			{ 
-				kv = kvList[colNum];
-				kvValLen[cellNum] = kv.getValueLength();
-				kvValOffset[cellNum] = kv.getValueOffset();
-				kvQualLen[cellNum] = kv.getQualifierLength();
-				kvQualOffset[cellNum] = kv.getQualifierOffset();
-				kvFamLen[cellNum] = kv.getFamilyLength();
-				kvFamOffset[cellNum] = kv.getFamilyOffset();
-				kvTimestamp[cellNum] = kv.getTimestamp();
-				kvBuffer[cellNum] = kv.getValueArray();
-				colFound = true;
-			}
-		}
-		int cellsReturned;
-		if (colFound)
-                	cellsReturned = cellNum++;
-		else
-			cellsReturned = 0;
-		if (cellsReturned == 0)
-			setResultInfo(jniObject, null, null,
-				null, null, null, null,
-				null, null, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
-		else 
-			setResultInfo(jniObject, kvValLen, kvValOffset,
-				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
-				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
-		return rowsReturned;	
-	}		
-	
-	protected int pushRowsToJni(Result result) 
-			throws IOException {
-		int rowsReturned = 1;
-		int numTotalCells;
-		if (numColsInScan == 0)
-			numTotalCells = result.size();
-		else
-		// There can be maximum of 2 versions per kv
-		// So, allocate place holder to keep cell info
-		// for that many KVs
-			numTotalCells = 2 * rowsReturned * numColsInScan;
-		int numColsReturned;
-		Cell[] kvList;
-		Cell kv;
-
-		if (kvValLen == null ||
-	 		(kvValLen != null && numTotalCells > kvValLen.length))
-		{
-			kvValLen = new int[numTotalCells];
-			kvValOffset = new int[numTotalCells];
-			kvQualLen = new int[numTotalCells];
-			kvQualOffset = new int[numTotalCells];
-			kvFamLen = new int[numTotalCells];
-			kvFamOffset = new int[numTotalCells];
-			kvTimestamp = new long[numTotalCells];
-			kvBuffer = new byte[numTotalCells][];
-		}
-		if (rowIDs == null)
-		{
-			rowIDs = new byte[rowsReturned][];
-			kvsPerRow = new int[rowsReturned];
-		}
-		kvList = result.rawCells();
- 		if (kvList == null)
-			numColsReturned = 0; 
-		else
-			numColsReturned = kvList.length;
-		if ((numColsReturned) > numTotalCells)
-			throw new IOException("Insufficient cell array pre-allocated");
- 		rowIDs[0] = result.getRow();
-		kvsPerRow[0] = numColsReturned;
-		for (int colNum = 0 ; colNum < numColsReturned ; colNum++)
-		{ 
-			kv = kvList[colNum];
-			kvValLen[colNum] = kv.getValueLength();
-			kvValOffset[colNum] = kv.getValueOffset();
-			kvQualLen[colNum] = kv.getQualifierLength();
-			kvQualOffset[colNum] = kv.getQualifierOffset();
-			kvFamLen[colNum] = kv.getFamilyLength();
-			kvFamOffset[colNum] = kv.getFamilyOffset();
-			kvTimestamp[colNum] = kv.getTimestamp();
-			kvBuffer[colNum] = kv.getValueArray();
-		}
-		if (numColsReturned == 0)
-			setResultInfo(jniObject, null, null,
-				null, null, null, null,
-				null, null, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
-		else
-			setResultInfo(jniObject, kvValLen, kvValOffset,
-				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
-				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
-		return rowsReturned;	
-	}		
-	
-	public boolean deleteRow(final long transID, byte[] rowID, 
-				 Object[] columns,
-				 long timestamp,
-                                 boolean asyncOperation) throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter deleteRow(" + new String(rowID) + ", "
-			     + timestamp + ") " + tableName);
-
-		final Delete del;
-		if (timestamp == -1)
-			del = new Delete(rowID);
-		else
-			del = new Delete(rowID, timestamp);
-
-		if (columns != null) {
-			for (int i = 0; i < columns.length ; i++) {
-				byte[] col = (byte[]) columns[i];
-				del.deleteColumns(getFamily(col), getName(col));
-			}
-		}
-               	if (asyncOperation) {
-			future = executorService.submit(new Callable() {
- 				public Object call() throws Exception {
-					boolean res = true;
-					if (useTRex && (transID != 0)) 
-				           table.delete(transID, del);
-				        else
-				           table.delete(del);
-				        return new Boolean(res);
-				}
-			});
-			return true;
-		}
-		else {
-	          	if (useTRex && (transID != 0)) 
-				table.delete(transID, del);
-			else
-				table.delete(del);
-		}
-		if (logger.isTraceEnabled()) logger.trace("Exit deleteRow");
-		return true;
-	}
-
-	public boolean deleteRows(final long transID, short rowIDLen, Object rowIDs,
-		      long timestamp,
-                      boolean asyncOperation) throws IOException {
-
-	        if (logger.isTraceEnabled()) logger.trace("Enter deleteRows() " + tableName);
-
-		final List<Delete> listOfDeletes = new ArrayList<Delete>();
-		listOfDeletes.clear();
-		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
-		short numRows = bbRowIDs.getShort();
-                byte[] rowID;		
-		byte rowIDSuffix;
-		short actRowIDLen;
-       
-		for (short rowNum = 0; rowNum < numRows; rowNum++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-
-			Delete del;
-			if (timestamp == -1)
-			    del = new Delete(rowID);
-			else
-			    del = new Delete(rowID, timestamp);
-			listOfDeletes.add(del);
-		}
-                if (asyncOperation) {
-                        future = executorService.submit(new Callable() {
-                                public Object call() throws Exception {
-                                    boolean res = true;
-				   if (useTRex && (transID != 0)) 
-				      table.delete(transID, listOfDeletes);
-				   else
-				      table.delete(listOfDeletes);
-				   return new Boolean(res);
-				}
-			});
-			return true;
-		}
-		else {
-			if (useTRex && (transID != 0)) 
-		    	   table.delete(transID, listOfDeletes);
-			else
-		  	   table.delete(listOfDeletes);
-		}
-		if (logger.isTraceEnabled()) logger.trace("Exit deleteRows");
-		return true;
-	}
-
-         public byte[] intToByteArray(int value) {
-	     return new byte[] {
-		 (byte)(value >>> 24),
-		 (byte)(value >>> 16),
-		 (byte)(value >>> 8),
-		 (byte)value};
-	 }
-    
-	public boolean checkAndDeleteRow(long transID, byte[] rowID, 
-					 byte[] columnToCheck, byte[] colValToCheck,
-					 long timestamp) throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter checkAndDeleteRow(" + new String(rowID) + ", "
-			     + new String(columnToCheck) + ", " + new String(colValToCheck) + ", " + timestamp + ") " + tableName);
-
-			Delete del;
-			if (timestamp == -1)
-				del = new Delete(rowID);
-			else
-				del = new Delete(rowID, timestamp);
-
-			byte[] family = null;
-			byte[] qualifier = null;
-
-			if (columnToCheck.length > 0) {
-				family = getFamily(columnToCheck);
-				qualifier = getName(columnToCheck);
-			}
-			
-			boolean res;
-			if (useTRex && (transID != 0)) {
-			    res = table.checkAndDelete(transID, rowID, family, qualifier, colValToCheck, del);
-			} else {
-			    res = table.checkAndDelete(rowID, family, qualifier, colValToCheck, del);
-			}
-
-			if (res == false)
-			    return false;
-		return true;
-	}
-
-	public boolean putRow(final long transID, final byte[] rowID, Object row,
-		byte[] columnToCheck, final byte[] colValToCheck,
-		final boolean checkAndPut, boolean asyncOperation) throws IOException, InterruptedException, 
-                          ExecutionException 
-	{
-		if (logger.isTraceEnabled()) logger.trace("Enter putRow() " + tableName);
-
-	 	final Put put;
-		ByteBuffer bb;
-		short numCols;
-		short colNameLen;
-                int colValueLen;
-		byte[] family = null;
-		byte[] qualifier = null;
-		byte[] colName, colValue;
-
-		bb = (ByteBuffer)row;
-		put = new Put(rowID);
-		numCols = bb.getShort();
-		for (short colIndex = 0; colIndex < numCols; colIndex++)
-		{
-			colNameLen = bb.getShort();
-			colName = new byte[colNameLen];
-			bb.get(colName, 0, colNameLen);
-			colValueLen = bb.getInt();	
-			colValue = new byte[colValueLen];
-			bb.get(colValue, 0, colValueLen);
-			put.add(getFamily(colName), getName(colName), colValue); 
-			if (checkAndPut && colIndex == 0) {
-				family = getFamily(colName);
-				qualifier = getName(colName);
-			} 
-		}
-		if (columnToCheck != null && columnToCheck.length > 0) {
-			family = getFamily(columnToCheck);
-			qualifier = getName(columnToCheck);
-		}
-		final byte[] family1 = family;
-		final byte[] qualifier1 = qualifier;
-		if (asyncOperation) {
-			future = executorService.submit(new Callable() {
-				public Object call() throws Exception {
-					boolean res = true;
-
-					if (checkAndPut) {
-		    				if (useTRex && (transID != 0)) 
-							res = table.checkAndPut(transID, rowID, 
-								family1, qualifier1, colValToCheck, put);
-		    				else 
-							res = table.checkAndPut(rowID, 
-								family1, qualifier1, colValToCheck, put);
-					}
-					else {
-		    				if (useTRex && (transID != 0)) 
-							table.put(transID, put);
-		    				else 
-							table.put(put);
-					}
-					return new Boolean(res);
-				}
-			});
-			return true;
-		} else {
-		 	boolean result = true;
-			if (checkAndPut) {
-		    		if (useTRex && (transID != 0)) 
-					result = table.checkAndPut(transID, rowID, 
-						family1, qualifier1, colValToCheck, put);
-		   		else 
-					result = table.checkAndPut(rowID, 
-						family1, qualifier1, colValToCheck, put);
-			}
-			else {
-		    		if (useTRex && (transID != 0)) 
-					table.put(transID, put);
-		    		else 
-					table.put(put);
-			}
-			return result;
-		}	
-	}
-
-	public boolean insertRow(long transID, byte[] rowID, 
-                         Object row, 
-			 long timestamp,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-		return putRow(transID, rowID, row, null, null, 
-				false, asyncOperation);
-	}
-
-	public boolean putRows(final long transID, short rowIDLen, Object rowIDs, 
-                       Object rows,
-                       long timestamp, boolean autoFlush, boolean asyncOperation)
-			throws IOException, InterruptedException, ExecutionException  {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter putRows() " + tableName);
-
-		Put put;
-		ByteBuffer bbRows, bbRowIDs;
-		short numCols, numRows;
-		short colNameLen;
-                int colValueLen;
-		byte[] colName, colValue, rowID;
-		byte rowIDSuffix;
-                short actRowIDLen;
-		bbRowIDs = (ByteBuffer)rowIDs;
-		bbRows = (ByteBuffer)rows;
-
-		final List<Put> listOfPuts = new ArrayList<Put>();
-		numRows = bbRowIDs.getShort();
-		
-		for (short rowNum = 0; rowNum < numRows; rowNum++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-			put = new Put(rowID);
-			numCols = bbRows.getShort();
-			for (short colIndex = 0; colIndex < numCols; colIndex++)
-			{
-				colNameLen = bbRows.getShort();
-				colName = new byte[colNameLen];
-				bbRows.get(colName, 0, colNameLen);
-				colValueLen = bbRows.getInt();	
-				colValue = new byte[colValueLen];
-				bbRows.get(colValue, 0, colValueLen);
-				put.add(getFamily(colName), getName(colName), colValue); 
-			}
-			if (writeToWAL)  
-				put.setWriteToWAL(writeToWAL);
-			listOfPuts.add(put);
-		}
-		if (autoFlush == false)
-			table.setAutoFlush(false, true);
-		if (asyncOperation) {
-			future = executorService.submit(new Callable() {
-				public Object call() throws Exception {
-					boolean res = true;
-					if (useTRex && (transID != 0)) 
-						table.put(transID, listOfPuts);
-					else 
-						table.put(listOfPuts);
-					return new Boolean(res);
-				}
-			});
-		}
-		else {
-			if (useTRex && (transID != 0)) 
-				table.put(transID, listOfPuts);
-			else 
-				table.put(listOfPuts);
-		}
-		return true;
-	} 
-
-	public boolean completeAsyncOperation(int timeout, boolean resultArray[]) 
-			throws InterruptedException, ExecutionException
-	{
-		if (timeout == -1) {
-			if (! future.isDone()) 
-				return false;
-		}
-	 	try {			
-			Boolean result = (Boolean)future.get(timeout, TimeUnit.MILLISECONDS);
-                        // Need to enhance to return the result 
-                        // for each Put object
-			for (int i = 0; i < resultArray.length; i++)
-			    resultArray[i] = result.booleanValue();
-			future = null;
- 		} catch(TimeoutException te) {
-			return false;
-		} 
-		return true;
-	}
-
-	public boolean checkAndInsertRow(long transID, byte[] rowID, 
-                         Object row, 
-			 long timestamp,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException  {
-		return putRow(transID, rowID, row, null, null, 
-				true, asyncOperation);
-	}
-
-	public boolean checkAndUpdateRow(long transID, byte[] rowID, 
-             Object columns, byte[] columnToCheck, byte[] colValToCheck,
-             long timestamp, boolean asyncOperation) throws IOException, InterruptedException, 
-                                    ExecutionException, Throwable  {
-		return putRow(transID, rowID, columns, columnToCheck, 
-			colValToCheck, 
-				true, asyncOperation);
-	}
-
-        public byte[] coProcAggr(long transID, int aggrType, 
-		byte[] startRowID, 
-              byte[] stopRowID, byte[] colFamily, byte[] colName, 
-              boolean cacheBlocks, int numCacheRows) 
-                          throws IOException, Throwable {
-
-		    Configuration customConf = table.getConfiguration();
-                    long rowCount = 0;
-
-                    if (transID > 0) {
-		      TransactionalAggregationClient aggregationClient = 
-                          new TransactionalAggregationClient(customConf);
-		      Scan scan = new Scan();
-		      scan.addFamily(colFamily);
-		      scan.setCacheBlocks(false);
-		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
-			new LongColumnInterpreter();
-		      byte[] tname = getTableName().getBytes();
-		      rowCount = aggregationClient.rowCount(transID, 
-                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
-                        ci,
-                        scan);
-                    }
-                    else {
-		      AggregationClient aggregationClient = 
-                          new AggregationClient(customConf);
-		      Scan scan = new Scan();
-		      scan.addFamily(colFamily);
-		      scan.setCacheBlocks(false);
-		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
-			new LongColumnInterpreter();
-		      byte[] tname = getTableName().getBytes();
-		      rowCount = aggregationClient.rowCount( 
-                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
-                        ci,
-                        scan);
-                    }
-
-		    coprocAggrResult = new ByteArrayList();
-
-		    byte[] rcBytes = 
-                      ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(rowCount).array();
-                    return rcBytes; 
-	}
-
-	public boolean flush() throws IOException {
-		if (table != null)
-			table.flushCommits();
-		return true;
-	}
-
-	public boolean release(boolean cleanJniObject) throws IOException {
-
-           boolean retcode = false;
-          // Complete the pending IO
-           if (future != null) {
-              try {
-                 future.get(30, TimeUnit.SECONDS);
-              } catch(TimeoutException | InterruptedException e) {
-		  logger.error("Asynchronous Thread is Cancelled, " + e);
-                  retcode = true;
-                  future.cancel(true); // Interrupt the thread
-              } catch (ExecutionException ee)
-              {
-              }
-              future = null;
-          }
-	  if (table != null)
-	    table.flushCommits();
-	  if (scanner != null) {
-	    scanner.close();
-	    scanner = null;
-	  }
-	  if (snapHelper !=null)
-	  {
-	    snapHelper.release();
-	    snapHelper = null;
-	  }
-	  cleanScan();		
-	  getResultSet = null;
-	  if (cleanJniObject) {
-	    if (jniObject != 0)
-	      cleanup(jniObject);
-            tableName = null;
-	  }
-          scanHelper = null;
-	  jniObject = 0;
-	  return retcode;
-	}
-
-	public boolean close(boolean clearRegionCache, boolean cleanJniObject) throws IOException {
-           if (logger.isTraceEnabled()) logger.trace("Enter close() " + tableName);
-           if (table != null) 
-           {
-              if (clearRegionCache)
-              {
-                 HConnection connection = table.getConnection();
-                 connection.clearRegionCache(tableName.getBytes());
-              }
-              table.close();
-              table = null;
-           }
-           return true;
-	}
-
-	public ByteArrayList getEndKeys() throws IOException {
-	    if (logger.isTraceEnabled()) logger.trace("Enter getEndKeys() " + tableName);
-            ByteArrayList result = new ByteArrayList();
-            if (table == null) {
-                return null;
-            }
-            byte[][] htableResult = table.getEndKeys();
-
-            // transfer the HTable result to ByteArrayList
-            for (int i=0; i<htableResult.length; i++ ) {
-                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
-                             htableResult[i]);
-                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
-                             new String(htableResult[i]));
-                result.add(htableResult[i]);
-            }
-
-            if (logger.isTraceEnabled()) logger.trace("Exit getEndKeys(), result size: " + result.getSize());
-            return result;
-	}
-
-    public ByteArrayList getStartKeys() throws IOException {
-        if (logger.isTraceEnabled()) logger.trace("Enter getStartKeys() " + tableName);
-        ByteArrayList result = new ByteArrayList();
-        if (table == null) {
-            return null;
-        }
-        byte[][] htableResult = table.getStartKeys();
-
-        // transfer the HTable result to ByteArrayList
-        for (int i=0; i<htableResult.length; i++ ) {
-            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
-                         htableResult[i]);
-            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
-                         new String(htableResult[i]));
-            result.add(htableResult[i]);
-        }
-
-        if (logger.isTraceEnabled()) logger.trace("Exit getStartKeys(), result size: " + result.getSize());
-        return result;
-    }
-
-    private void cleanScan()
-    {
-        if (fetchType == GET_ROW || fetchType == BATCH_GET)
-           return;
-        numRowsCached = 1;
-        numColsInScan = 0;
-        kvValLen = null;
-        kvValOffset = null;
-        kvQualLen = null;
-        kvQualOffset = null;
-        kvFamLen = null;
-        kvFamOffset = null;
-        kvTimestamp = null;
-        kvBuffer = null;
-        rowIDs = null;
-        kvsPerRow = null;
-    }
-
-    protected void setJniObject(long inJniObject) {
-       jniObject = inJniObject;
-    }    
-
-    private native int setResultInfo(long jniObject,
-				int[] kvValLen, int[] kvValOffset,
-				int[] kvQualLen, int[] kvQualOffset,
-				int[] kvFamLen, int[] kvFamOffset,
-  				long[] timestamp, 
-				byte[][] kvBuffer, byte[][] rowIDs,
-				int[] kvsPerRow, int numCellsReturned,
-				int rowsReturned);
-
-   private native void cleanup(long jniObject);
-
-   protected native int setJavaObject(long jniObject);
- 
-   static {
-     executorService = Executors.newCachedThreadPool();
-     System.loadLibrary("executor");
-   }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HiveClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HiveClient.java b/core/sql/executor/HiveClient.java
deleted file mode 100755
index 5cedcc8..0000000
--- a/core/sql/executor/HiveClient.java
+++ /dev/null
@@ -1,301 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.ArrayList;
-import java.util.List;
-import java.lang.reflect.Field;
-
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import org.apache.hadoop.util.StringUtils;
-
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-// These are needed for the DDL_TIME constant. This class is different in Hive 0.10.
-// We use Java reflection instead of importing the class statically. 
-// For Hive 0.9 or lower
-// import org.apache.hadoop.hive.metastore.api.Constants;
-// For Hive 0.10 or higher
-// import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSDataOutputStream;
-
-import java.sql.SQLException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.Statement;
-import java.sql.DriverManager;
-
-
-public class HiveClient {
-    static Logger logger = Logger.getLogger(HiveClient.class.getName());
-    static String ddlTimeConst = null;
-    String lastError;
-    HiveConf hiveConf = null;
-    HiveMetaStoreClient hmsClient  ;
-    FSDataOutputStream fsOut = null;
-
-    public HiveClient() {
-   
-    }
-
-    public String getLastError() {
-        return lastError;
-    }
-
-    void setLastError(String err) {
-        lastError = err;
-    }
-
-    void setupLog4j() {
-        String confFile = System.getenv("MY_SQROOT")
-            + "/conf/log4j.hdfs.config";
-        PropertyConfigurator.configure(confFile);
-    }
-
-    public boolean init(String metastoreURI) 
-              throws MetaException {
-         setupLog4j();
-         if (logger.isDebugEnabled()) logger.debug("HiveClient.init(" + metastoreURI + " " + ") called.");
-         ddlTimeConst = getDDLTimeConstant();
-         hiveConf = new HiveConf();
-	 if (metastoreURI.length() > 0) {
-             hiveConf.set("hive.metastore.local", "false");
-             hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreURI);
-         }
-         hmsClient = new HiveMetaStoreClient(hiveConf, null);
-         return true;
-    }
-
-    public boolean close() {
-        hmsClient.close();
-        return true;
-    }
-
-    public boolean exists(String schName, String tblName)  
-        throws MetaException, TException, UnknownDBException {
-            if (logger.isDebugEnabled()) logger.debug("HiveClient.exists(" + schName + " , " + tblName + ") called.");
-            boolean result = hmsClient.tableExists(schName, tblName);
-            return result;
-    }
-
-    public String getHiveTableString(String schName, String tblName)
-        throws MetaException, TException {
-        Table table;
-        if (logger.isDebugEnabled()) logger.debug("HiveClient.getHiveTableString(" + schName + " , " + 
-                     tblName + ") called.");
-        try {
-            table = hmsClient.getTable(schName, tblName);
-        }
-        catch (NoSuchObjectException x) {
-            if (logger.isDebugEnabled()) logger.debug("HiveTable not found");
-            return new String("");
-        }
-        if (logger.isDebugEnabled()) logger.debug("HiveTable is " + table.toString());
-        return table.toString() ;
-    }
-
-    public long getRedefTime(String schName, String tblName)
-        throws MetaException, TException, ClassCastException, NullPointerException, NumberFormatException {
-        Table table;
-        if (logger.isDebugEnabled()) logger.debug("HiveClient.getRedefTime(" + schName + " , " + 
-                     tblName + ") called.");
-        try {
-            table = hmsClient.getTable(schName, tblName);
-            if (logger.isDebugEnabled()) logger.debug("getTable returns null for " + schName + "." + tblName + ".");
-            if (table == null)
-                return 0;
-        }
-        catch (NoSuchObjectException x) {
-            if (logger.isDebugEnabled()) logger.debug("Hive table no longer exists.");
-            return 0;
-        }
-
-        long redefTime = table.getCreateTime();
-        if (table.getParameters() != null){
-            // those would be used without reflection
-            //String rfTime = table.getParameters().get(Constants.DDL_TIME);
-            //String rfTime = table.getParameters().get(hive_metastoreConstants.DDL_TIME);
-            // determing the constant using reflection instead
-            String rfTime = table.getParameters().get(ddlTimeConst);
-            if (rfTime != null)
-                redefTime = Long.parseLong(rfTime);
-        }
-        if (logger.isDebugEnabled()) logger.debug("RedefTime is " + redefTime);
-        return redefTime ;
-    }
-
-    public Object[] getAllSchemas() throws MetaException {
-        List<String> schemaList = (hmsClient.getAllDatabases());
-        if (schemaList != null)
-           return schemaList.toArray();
-        else
-           return null; 
-    }
-
-    public Object[] getAllTables(String schName) 
-        throws MetaException {
-        List<String> tableList = hmsClient.getAllTables(schName);
-        if (tableList != null)
-           return tableList.toArray();
-        else
-           return null;
-    }
-
-    // Because Hive changed the name of the class containing internal constants changed
-    // in Hive 0.10, we are using Java Reflection to get the value of the DDL_TIME constant.
-    public static String getDDLTimeConstant()
-        throws MetaException {
-
-        Class constsClass = null;
-        Object constsFromReflection = null; 
-        Field ddlTimeField = null;
-        Object fieldVal = null;
-
-        // Using the class loader, try to load either class by name.
-        // Note that both classes have a default constructor and both have a static
-        // String field DDL_TIME, so the rest of the code is the same for both.
-        try { 
-            try {
-                constsClass = Class.forName(
-                   // Name in Hive 0.10 and higher
-                   "org.apache.hadoop.hive.metastore.api.hive_metastoreConstants");
-            } catch (ClassNotFoundException e) { 
-                // probably not found because we are using Hive 0.10 or later
-                constsClass = null;
-            } 
-            if (constsClass == null) {
-                constsClass = Class.forName(
-                    // Name in Hive 0.9 and lower
-                    "org.apache.hadoop.hive.metastore.api.Constants");
-            }
-
-            // Make a new object for this class, using the default constructor
-            constsFromReflection = constsClass.newInstance(); 
-        } catch (InstantiationException e) { 
-            throw new MetaException("Instantiation error for metastore constants class");
-        } catch (IllegalAccessException e) { 
-            throw new MetaException("Illegal access exception");
-        } catch (ClassNotFoundException e) { 
-            throw new MetaException("Could not find Hive Metastore constants class");
-        } 
-
-        // Using Java reflection, get a reference to the DDL_TIME field
-        try {
-            ddlTimeField = constsClass.getField("DDL_TIME");
-        } catch (NoSuchFieldException e) {
-            throw new MetaException("Could not find DDL_TIME constant field");
-        }
-
-        // get the String object that represents the value of this field
-        try {
-            fieldVal = ddlTimeField.get(constsFromReflection);
-        } catch (IllegalAccessException e) {
-            throw new MetaException("Could not get value for DDL_TIME constant field");
-        }
-
-        return fieldVal.toString();
-    }
-
-  ///////////////////   
-  boolean hdfsCreateFile(String fname) throws IOException
-  {
-    HiveConf  config = new HiveConf();
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - started" );
-    Path filePath = new Path(fname);
-    FileSystem fs = FileSystem.get(filePath.toUri(),config);
-    fsOut = fs.create(filePath, true);
-    
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - file created" );
-
-    return true;
-  }
-  
-  boolean hdfsWrite(byte[] buff, long len) throws Exception
-  {
-
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - started" );
-    try
-    {
-      fsOut.write(buff);
-      fsOut.flush();
-    }
-    catch (Exception e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() -- exception: " + e);
-      throw e;
-    }
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - bytes written and flushed:" + len  );
-    
-    return true;
-  }
-  
-  boolean hdfsClose() throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - started" );
-    try
-    {
-      fsOut.close();
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - exception:" + e);
-      throw e;
-    }
-    return true;
-  }
-  
-  public void executeHiveSQL(String ddl) throws ClassNotFoundException, SQLException
-  {
-      try
-      {
-          Class.forName("org.apache.hive.jdbc.HiveDriver");
-      }
- 
-      catch(ClassNotFoundException e) 
-      {
-          throw e;
-      }
-
-      try 
-      {
-          Connection con = DriverManager.getConnection("jdbc:hive2://", "hive", "");
-          Statement stmt = con.createStatement();
-          stmt.execute(ddl);
-      }
- 
-      catch(SQLException e)
-      {
-	  throw e;
-      }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/OrcFileReader.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/OrcFileReader.cpp b/core/sql/executor/OrcFileReader.cpp
index 25a431a..9dfafd5 100644
--- a/core/sql/executor/OrcFileReader.cpp
+++ b/core/sql/executor/OrcFileReader.cpp
@@ -69,7 +69,7 @@ OrcFileReader::~OrcFileReader()
 //////////////////////////////////////////////////////////////////////////////
 OFR_RetCode OrcFileReader::init()
 {
-  static char className[]="org/apache/hadoop/hive/ql/io/orc/OrcFileReader";
+  static char className[]="org/trafodion/sql/OrcFileReader";
   
   if (JavaMethods_)
     return (OFR_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, TRUE);       
@@ -102,7 +102,7 @@ OFR_RetCode OrcFileReader::init()
 //    JavaMethods_[JM_FETCHROW2 ].jm_name      = "fetchNextRow";
 //    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()[B";
     JavaMethods_[JM_FETCHROW2 ].jm_name      = "fetchNextRowObj";
-    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()Lorg/apache/hadoop/hive/ql/io/orc/OrcFileReader$OrcRowReturnSQL;";
+    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()Lorg/trafodion/sql/OrcFileReader$OrcRowReturnSQL;";
     JavaMethods_[JM_GETNUMROWS ].jm_name      = "getNumberOfRows";
     JavaMethods_[JM_GETNUMROWS ].jm_signature = "()J";
 //    JavaMethods_[JM_FETCHBUFF1].jm_name      = "fetchArrayOfRows";


Mime
View raw message