trafodion-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sure...@apache.org
Subject [1/6] incubator-trafodion git commit: [TRAFODION-25] First draft of DELETE costing changes
Date Thu, 08 Oct 2015 04:05:36 GMT
Repository: incubator-trafodion
Updated Branches:
  refs/heads/master 0618e6ec5 -> ba61c39ef


[TRAFODION-25] First draft of DELETE costing changes


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/8e0b3d44
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/8e0b3d44
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/8e0b3d44

Branch: refs/heads/master
Commit: 8e0b3d44efb149d033dfadcbed4f99639a0b53fe
Parents: 469408f
Author: Dave Birdsall <dbirdsall@apache.org>
Authored: Thu Oct 1 22:12:46 2015 +0000
Committer: Dave Birdsall <dbirdsall@apache.org>
Committed: Thu Oct 1 22:29:44 2015 +0000

----------------------------------------------------------------------
 core/sql/generator/GenPreCode.cpp         |  47 ++-
 core/sql/optimizer/BindRelExpr.cpp        |   8 +
 core/sql/optimizer/CostMethod.h           |  71 ++--
 core/sql/optimizer/OptPhysRelExpr.cpp     |  24 ++
 core/sql/optimizer/RelUpdate.h            |  10 +-
 core/sql/optimizer/ScmCostMethod.cpp      | 564 ++++++++++++++++++++++++-
 core/sql/optimizer/TransRule.cpp          |   9 +
 core/sql/regress/executor/EXPECTED015.SB  |   2 +-
 core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp |  25 ++
 core/sql/sqlcomp/DefaultConstants.h       |  10 +-
 core/sql/sqlcomp/nadefaults.cpp           |  12 +
 11 files changed, 714 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/generator/GenPreCode.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenPreCode.cpp b/core/sql/generator/GenPreCode.cpp
index 62b59e1..8a02e4c 100644
--- a/core/sql/generator/GenPreCode.cpp
+++ b/core/sql/generator/GenPreCode.cpp
@@ -4368,14 +4368,6 @@ RelExpr * GenericUpdate::preCodeGen(Generator * generator,
 	  identityCol = valId.getNAColumn();
 	}
 
-      if ((getOperatorType() != REL_HBASE_UPDATE) &&
-	  (mergeInsertRecExpr().entries() > 0))
-	{
-	  *CmpCommon::diags() << DgSqlCode(-3241)
-			      << DgString0(" Non-unique ON clause not allowed with INSERT.");
-	  GenExit();
-	}
-
       if (((getOperatorType() == REL_HBASE_DELETE) ||
            (getOperatorType() == REL_HBASE_UPDATE)) &&
           (getTableDesc()->getNATable()->getClusteringIndex()->hasSyskey()))
@@ -4385,12 +4377,14 @@ RelExpr * GenericUpdate::preCodeGen(Generator * generator,
 	  GenExit();
 	}
 
-      if ((getOperatorType() == REL_HBASE_DELETE) &&
+      if ((getOperatorType() != REL_HBASE_UPDATE) &&
           (mergeInsertRecExpr().entries() > 0) &&
           (CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF))
 	{
+          // MERGE with INSERT is limited to HBase updates unless
+          // the CQD is on
 	  *CmpCommon::diags() << DgSqlCode(-3241)
-			      << DgString0(" MERGE delete not allowed with INSERT.");
+			      << DgString0(" This MERGE is not allowed with INSERT.");
 	  GenExit();
 	}
 
@@ -10987,6 +10981,7 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr
* ie,
   NABoolean found = FALSE;
   removeFromOrigList = FALSE;
   NABoolean hbaseLookupPred = FALSE;
+  NABoolean flipOp = FALSE;  // set to TRUE when column is child(1)
 
   if (ie && 
       ((ie->getOperatorType() >= ITM_EQUAL) &&
@@ -11006,6 +11001,7 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr
* ie,
 	       (NOT hasColReference(ie->child(0))))
 	{
 	  found = TRUE;
+          flipOp = TRUE;
 	  colVID = ie->child(1)->getValueId();
 	  valueVID = ie->child(0)->getValueId();
 	}
@@ -11020,6 +11016,7 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr
* ie,
 	       (NOT hasColReference(ie->child(0))))
 	{
 	  found = TRUE;
+          flipOp = TRUE;
 	  colVID = ie->child(1)->getValueId();
 	  valueVID = ie->child(0)->getValueId();
 	}
@@ -11034,6 +11031,7 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr
* ie,
 	       (NOT hasColReference(ie->child(0))))
 	{
 	  found = TRUE;
+          flipOp = TRUE;
 	  colVID = ie->child(1)->getValueId();
 	  valueVID = ie->child(0)->getValueId();
 	}
@@ -11067,6 +11065,7 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr
* ie,
 	      newCV = newCV->preCodeGen(generator);
 	      
 	      found = TRUE;
+              flipOp = TRUE;
 	      colVID = newCV->getValueId();
 	      valueVID = ie->child(0)->getValueId();
 	    }
@@ -11180,13 +11179,33 @@ NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator,
ItemExpr * ie,
 	  else  if (ie->getOperatorType() == ITM_NOT_EQUAL)
 	    op = "NOT_EQUAL";
 	  else  if (ie->getOperatorType() == ITM_LESS)
-	    op = "LESS";
+            {
+            if (flipOp)
+              op = "GREATER";
+            else
+	      op = "LESS";
+            }
 	  else  if (ie->getOperatorType() == ITM_LESS_EQ)
-	    op = "LESS_OR_EQUAL";
+            {
+            if (flipOp)
+              op = "GREATER_OR_EQUAL";
+            else
+	      op = "LESS_OR_EQUAL";
+            }
 	  else  if (ie->getOperatorType() == ITM_GREATER)
-	    op = "GREATER";
+            {
+            if (flipOp)
+              op = "LESS";
+            else
+	      op = "GREATER";
+            }
 	  else  if (ie->getOperatorType() == ITM_GREATER_EQ)
-	    op = "GREATER_OR_EQUAL";
+            {
+            if (flipOp)
+              op = "LESS_OR_EQUAL";
+            else
+	      op = "GREATER_OR_EQUAL";
+            }
 	  else
 	    op = "NO_OP";
 	}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/BindRelExpr.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/BindRelExpr.cpp b/core/sql/optimizer/BindRelExpr.cpp
index 5d3f586..6e7b2cc 100644
--- a/core/sql/optimizer/BindRelExpr.cpp
+++ b/core/sql/optimizer/BindRelExpr.cpp
@@ -11046,6 +11046,14 @@ RelExpr *MergeDelete::bindNode(BindWA *bindWA)
         bindWA->setErrStatus();
         return this;
       }
+      if (CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF)
+      {
+        // MERGE DELETE + INSERT is buggy, so disallow it unless CQD is on. In
+        // particular, the optimizer sometimes fails to produce a plan in phase 1.
+	*CmpCommon::diags() << DgSqlCode(-3241)
+			    << DgString0(" MERGE DELETE not allowed with INSERT.");
+
+      }
 
       Tuple * tuple = new (bindWA->wHeap()) Tuple(insertValues());
       Insert * ins = new (bindWA->wHeap())

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/CostMethod.h
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/CostMethod.h b/core/sql/optimizer/CostMethod.h
index 22f3d13..d7568a2 100644
--- a/core/sql/optimizer/CostMethod.h
+++ b/core/sql/optimizer/CostMethod.h
@@ -67,8 +67,9 @@ class CostMethodTuple;
 class CostMethodTranspose;
 class CostMethodStoredProc;
 class CostMethodHbaseInsert;
-class CostMethodDP2Update;
-class CostMethodDP2Delete;
+class CostMethodHbaseUpdateOrDelete;
+class CostMethodHbaseUpdate;
+class CostMethodHbaseDelete;
 class CostMethodUnPackRows;
 class CostMethodTableMappingUDF;
 class CostMethodFastExtract;
@@ -2036,30 +2037,31 @@ protected:
 //<pb>
 /**********************************************************************/
 /*                                                                    */
-/*                         CostMethodDP2UpdateOrDelete                */
+/*                         CostMethodHbaseUpdateOrDelete              */
 /*                                                                    */
 /**********************************************************************/
-class CostMethodDP2UpdateOrDelete : public CostMethod
+class CostMethodHbaseUpdateOrDelete : public CostMethod
 {
 public:
   // Constructor
-  CostMethodDP2UpdateOrDelete( const char* className )
+  CostMethodHbaseUpdateOrDelete( const char* className )
     : CostMethod( className ) 
   {};
 
 protected:
 
-  // Cost functions
+  // Old model cost function (obsolete; only here because base
+  // class requires an implementation)
   virtual Cost* computeOperatorCostInternal(RelExpr* op,
                                             const Context* myContext,
-                                            Lng32& countOfStreams) = 0;
+                                            Lng32& countOfStreams);
+
   // SCM Cost function
   virtual Cost* scmComputeOperatorCostInternal(RelExpr* op,
-					       const Context* myContext,
                                                const PlanWorkSpace* pws,
 					       Lng32& countOfStreams) = 0;
 
-  // Common functions to cost DP2 operations.
+  // Common functions to cost Hbase operations.
   NABoolean allKeyColumnsHaveHistogramStatistics(
     const IndexDescHistograms & histograms,
     const IndexDesc * CIDesc
@@ -2072,84 +2074,59 @@ protected:
     const IndexDesc * CIDesc
     ) const;
 
-  void computePartialScanCostForDP2CursorOperation(
-    CostScalar & seeksFR, CostScalar & transferFR, CostScalar & cpuFR,
-    CostScalar & seeksLR, CostScalar & transferLR, CostScalar & cpuLR,
+void computeIOCostsForCursorOperation(
+    CostScalar & randomIOs,        // out
+    CostScalar & sequentialIOs,    // out
     const IndexDesc * CIDesc,
     const CostScalar & numRowsToScan,
     NABoolean probesInOrder
     ) const;
 
-  void computePartialScanCostForDP2SubSetOperation(
-    CostScalar & seeksFR, CostScalar & transferFR, CostScalar & cpuFR,
-    CostScalar & seeksLR, CostScalar & transferLR, CostScalar & cpuLR,
-    const IndexDesc * CIDesc,
-    const CostScalar & numRowsToScan,
-    const CostScalar & numExecutorPreds
-    ) const;
-
-  inline CostScalar computeIdleCost( const CostScalar & activePartitions );
-
 protected:
 
-  // Used for MV logging pushdown
-  NABoolean usedForMvLogging_;
-  const PushDownProperty* pushDownProperty_;
 };
 //<pb>
 /**********************************************************************/
 /*                                                                    */
-/*                         CostMethodDP2Update                        */
+/*                         CostMethodHbaseUpdate                        */
 /*                                                                    */
 /**********************************************************************/
-class CostMethodDP2Update : public CostMethodDP2UpdateOrDelete
+class CostMethodHbaseUpdate : public CostMethodHbaseUpdateOrDelete
 {
 public:
   // Constructor
-  CostMethodDP2Update() 
-    : CostMethodDP2UpdateOrDelete( "CostMethodDP2Update" )  {}
+  CostMethodHbaseUpdate()
+    : CostMethodHbaseUpdateOrDelete( "CostMethodHbaseUpdate" )  {}
 
 protected:
-  // Cost functions
-  virtual Cost* computeOperatorCostInternal(RelExpr* op,
-                                            const Context* myContext,
-                                            Lng32& countOfStreams);
   // SCM Cost function
   virtual Cost* scmComputeOperatorCostInternal(RelExpr* op,
-					       const Context* myContext,
                                                const PlanWorkSpace* pws,
 					       Lng32& countOfStreams);
 
-  virtual void cacheParameters(RelExpr*, const Context *);
-  virtual void cleanUp();
 
-}; // class CostMethodDP2Update
+}; // class CostMethodHbaseUpdate
 
 //<pb>
 /**********************************************************************/
 /*                                                                    */
-/*                         CostMethodDP2Delete                        */
+/*                         CostMethodHbaseDelete                        */
 /*                                                                    */
 /**********************************************************************/
-class CostMethodDP2Delete : public CostMethodDP2UpdateOrDelete
+class CostMethodHbaseDelete : public CostMethodHbaseUpdateOrDelete
 {
 public:
   // Constructor
-  CostMethodDP2Delete() 
-    : CostMethodDP2UpdateOrDelete( "CostMethodDP2Delete" )  {}
+  CostMethodHbaseDelete()
+    : CostMethodHbaseUpdateOrDelete( "CostMethodHbaseDelete" )  {}
 
 protected:
-  // Cost functions
-  virtual Cost* computeOperatorCostInternal(RelExpr* op,
-                                            const Context* myContext,
-                                            Lng32& countOfStreams);
   // SCM Cost function
   virtual Cost* scmComputeOperatorCostInternal(RelExpr* op,
-					       const Context* myContext,
                                                const PlanWorkSpace* pws,
 					       Lng32& countOfStreams);
 
-}; // class CostMethodDP2Delete
+}; // class CostMethodHbaseDelete
 
 
 /**********************************************************************/

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/OptPhysRelExpr.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/OptPhysRelExpr.cpp b/core/sql/optimizer/OptPhysRelExpr.cpp
index e291fc0..93b2e03 100644
--- a/core/sql/optimizer/OptPhysRelExpr.cpp
+++ b/core/sql/optimizer/OptPhysRelExpr.cpp
@@ -16546,6 +16546,18 @@ RelInternalSP::costMethod() const
 } // RelInternalSP::costMethod()
 //<pb>
 
+CostMethod *
+HbaseDelete::costMethod() const
+{
+  if (CmpCommon::getDefault(HBASE_DELETE_COSTING) == DF_OFF)
+    return RelExpr::costMethod();  // returns cost 1 cost object
+
+  static THREAD_P CostMethodHbaseDelete *m = NULL;
+  if (m == NULL)
+    m = new (GetCliGlobals()->exCollHeap()) CostMethodHbaseDelete();
+  return m;
+} // HbaseDelete::costMethod()
+
 PhysicalProperty*
 HbaseDelete::synthPhysicalProperty(const Context* myContext,
                                    const Lng32     planNumber,
@@ -16580,6 +16592,18 @@ HbaseDelete::synthPhysicalProperty(const Context* myContext,
 
 } // HbaseDelete::synthPhysicalProperty()
 
+CostMethod *
+HbaseUpdate::costMethod() const
+{
+  if (CmpCommon::getDefault(HBASE_UPDATE_COSTING) == DF_OFF)
+    return RelExpr::costMethod();  // returns cost 1 cost object
+
+  static THREAD_P CostMethodHbaseUpdate *m = NULL;
+  if (m == NULL)
+    m = new (GetCliGlobals()->exCollHeap()) CostMethodHbaseUpdate();
+  return m;
+} // HbaseUpdate::costMethod()
+
 PhysicalProperty*
 HbaseUpdate::synthPhysicalProperty(const Context* myContext,
                                    const Lng32     planNumber,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/RelUpdate.h
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/RelUpdate.h b/core/sql/optimizer/RelUpdate.h
index 5f5f982..0e357e9 100644
--- a/core/sql/optimizer/RelUpdate.h
+++ b/core/sql/optimizer/RelUpdate.h
@@ -540,6 +540,8 @@ GenericUpdate(const CorrName &name,
   inline void setPrecondition(const ValueIdSet pc)
            { precondition_ = pc; exprsInDerivedClasses_ += precondition_; }
 
+  virtual ItemExpr * insertValues() { return NULL;}
+
 protected:
 
   // Here, derived classes can register expressions that are used by
@@ -1438,7 +1440,7 @@ public:
   virtual const NAString getText() const;
 
   ItemExpr * insertCols() {return insertCols_;}
-  ItemExpr * insertValues() { return insertValues_;}
+  virtual ItemExpr * insertValues() { return insertValues_;}
   NABoolean xformedUpsert() {return xformedUpsert_;}
   void setXformedUpsert() {xformedUpsert_ = TRUE;}
 private:
@@ -1589,7 +1591,7 @@ public:
   virtual const NAString getText() const;
 
   ItemExpr * insertCols() {return insertCols_;}
-  ItemExpr * insertValues() { return insertValues_;}
+  virtual ItemExpr * insertValues() { return insertValues_;}
 private:
   ItemExpr *insertCols_;
   ItemExpr *insertValues_;
@@ -1699,6 +1701,8 @@ public:
                                                   const Lng32     planNumber,
                                                   PlanWorkSpace  *pws);
 
+  virtual CostMethod * costMethod() const;
+
   //! getText method
   //  used to display the name of the node.
   virtual const NAString getText() const;
@@ -2005,6 +2009,8 @@ public:
                                                   const Lng32     planNumber,
                                                   PlanWorkSpace  *pws);
 
+  virtual CostMethod * costMethod() const;
+
   //! getText method
   //  used to display the name of the node.
   virtual const NAString getText() const;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/ScmCostMethod.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/ScmCostMethod.cpp b/core/sql/optimizer/ScmCostMethod.cpp
index 85e1aa9..c5d19d2 100644
--- a/core/sql/optimizer/ScmCostMethod.cpp
+++ b/core/sql/optimizer/ScmCostMethod.cpp
@@ -3632,6 +3632,561 @@ CostMethodMergeUnion::scmComputeOperatorCostInternal(RelExpr *op,
 	   
 //<pb>
 
+// -------------------------------------------------------------------
+// Cost methods for write DML operations
+// -------------------------------------------------------------------
+
+
+// -------------------------------------------------------------------
+// This method is a stub for obsolete old cost model
+// -------------------------------------------------------------------
+Cost*
+CostMethodHbaseUpdateOrDelete::computeOperatorCostInternal(RelExpr* op,
+  const Context * context,
+  Lng32& countOfStreams)
+{
+  CMPASSERT(false);  // should never be called
+  return NULL;
+}
+
+// -----------------------------------------------------------------------
+// CostMethodHbaseUpdateOrDelete::allKeyColumnsHaveHistogramStatistics()
+//
+// Returns TRUE if all key columns have histograms, FALSE if not.
+// -----------------------------------------------------------------------
+NABoolean CostMethodHbaseUpdateOrDelete::allKeyColumnsHaveHistogramStatistics(
+  const IndexDescHistograms & histograms,
+  const IndexDesc * CIDesc
+  ) const
+{
+  // Check if all key columns have histogram statistics
+  NABoolean statsForAllKeyCols = TRUE;
+  for ( CollIndex j = 0; j < CIDesc->getIndexKey().entries(); j++ )
+  {
+    if (histograms.isEmpty())
+    {
+      statsForAllKeyCols = FALSE;
+      break;
+    }
+    else if (!histograms.getColStatsPtrForColumn((CIDesc->getIndexKey()) [j]))
+    {
+      // If we get a null pointer when we try to retrieve a
+      // ColStats for a column of this table, then no histogram
+      // data was created for that column.
+      statsForAllKeyCols = FALSE;
+      break;
+    }
+  }
+
+  return statsForAllKeyCols;
+}   // CostMethodHbaseUpdateOrDelete::allKeyColumnsHaveHistogramStatistics()
+
+// -----------------------------------------------------------------------
+// CostMethodHbaseUpdateOrDelete::numRowsToScanWhenAllKeyColumnsHaveHistograms()
+//
+// Returns an estimate of the number of rows that will be scanned as a
+// result of applying key predicates. Assumes that histograms exist for
+// all key columns.
+// -----------------------------------------------------------------------
+#pragma nowarn(262)   // warning elimination
+CostScalar
+CostMethodHbaseUpdateOrDelete::numRowsToScanWhenAllKeyColumnsHaveHistograms(
+  IndexDescHistograms & histograms,
+  const ColumnOrderList & keyPredsByCol,
+  const CostScalar & activePartitions,
+  const IndexDesc * CIDesc
+  ) const
+{
+
+  // Determine if there are single subset predicates:
+  CollIndex singleSubsetPrefixOrder;
+  NABoolean itIsSingleSubset =
+    keyPredsByCol.getSingleSubsetOrder( singleSubsetPrefixOrder );
+
+  NABoolean thereAreSingleSubsetPreds = FALSE;
+  if ( singleSubsetPrefixOrder > 0 )
+  {
+    thereAreSingleSubsetPreds = TRUE;
+  }
+  else
+  {
+    //  singleSubsetPrefixOrder==0  means either there
+    // is an equal, an IN,  or there are no key preds in the
+    // first column.
+    // singleSubsetPrefixOrder==0 AND itIsSingleSubset
+    // means there is an EQUAL or there are no key preds
+    // in the first column, check for existance of
+    // predicates in this case:
+    if (     itIsSingleSubset // this FALSE for an IN predicate
+	 AND keyPredsByCol[0] != NULL
+       )
+    {
+      thereAreSingleSubsetPreds = TRUE;
+    }
+  }
+
+
+  CMPASSERT(NOT histograms.isEmpty());
+
+  if ( thereAreSingleSubsetPreds )
+  {
+    // ---------------------------------------------------------
+    // There are some key predicates, so apply them
+    // to the histograms and get the total rows:
+    // ---------------------------------------------------------
+
+    // Get all the key preds for the key columns up to the first
+    // key column with no key preds
+    ValueIdSet singleSubsetPrefixPredicates;
+    NABoolean conflict = FALSE;
+    for ( CollIndex i = 0; i <= singleSubsetPrefixOrder; i++ )
+    {
+
+      const ValueIdSet *predsPtr = keyPredsByCol[i];
+      CMPASSERT( predsPtr != NULL ); // it must contain preds
+      singleSubsetPrefixPredicates.insert( *predsPtr );
+
+    } // for every key col in the sing. subset prefix
+
+    // Apply those key predicates that reference key columns
+    // before the first missing key to the histograms:
+    const SelectivityHint * selHint = CIDesc->getPrimaryTableDesc()->getSelectivityHint();
+    const CardinalityHint * cardHint = CIDesc->getPrimaryTableDesc()->getCardinalityHint();
+
+    RelExpr * dummyExpr = new (STMTHEAP) RelExpr(ITM_FIRST_ITEM_OP,
+				                 NULL,
+				                 NULL,
+				                 STMTHEAP);
+
+    histograms.applyPredicates( singleSubsetPrefixPredicates, *dummyExpr, selHint, cardHint);
+
+  } // if there are key predicates
+
+  // If there is no key predicates, a full table scan will be generated.
+  // Otherwise, key predicates will be applied to the histograms.
+  // Now, compute the number of rows after key preds are applied,
+  // and accounting for asynchronous parallelism:
+  CostScalar numRowsToScan =
+    ((histograms.getRowCount()/activePartitions).getCeiling()).minCsOne();
+
+  return numRowsToScan;
+}   // CostMethodHbaseUpdateOrDelete::numRowsToScanWhenAllKeyColumnsHaveHistograms()
+#pragma warn(262)  // warning elimination
+
+// -----------------------------------------------------------------------
+// CostMethodHbaseUpdateOrDelete::computeIOCostsForCursorOperation().
+// -----------------------------------------------------------------------
+void CostMethodHbaseUpdateOrDelete::computeIOCostsForCursorOperation(
+  CostScalar & randomIOs,        // out
+  CostScalar & sequentialIOs,    // out
+  const IndexDesc * CIDesc,
+  const CostScalar & numRowsToScan,
+  NABoolean probesInOrder
+  ) const
+{
+  const CostScalar indexLevels = CIDesc->getIndexLevels();
+  const CostScalar & kbPerBlock = CIDesc->getBlockSizeInKb();
+  const CostScalar rowsPerBlock =
+    ((kbPerBlock * csOneKiloBytes) /
+     CIDesc->getNAFileSet()->getRecordLength()).getFloor();
+  CostScalar totalIndexBlocks(csZero);
+
+  if (probesInOrder)
+  {
+    // If the probes are in order, assume that each successive
+    // probe refers to the next record in the table, i.e. the
+    // probes are "highly inclusive", or in other words, there
+    // are no gaps in the records to be updated. So, assuming
+    // this, the number of blocks that need to be read is
+    // the # of probes divided by the rows per block. This is
+    // guaranteed to be correct if we are updating most of the
+    // rows, we can only go wrong if we are updating a small
+    // number of dispersed rows. This seems unlikely, and anyway
+    // even if it's true we won't be that far off. If the rows
+    // are highly inclusive, we could also assume that since the
+    // blocks will be contiguous that there will only by one seek.
+    // But, we'd be way off in the case where the assumption
+    // doesn't hold so we won't do it for now. What we need is
+    // some way to determine the "inclusiveness factor".
+    sequentialIOs = (numRowsToScan / rowsPerBlock).getCeiling();
+    // The # of index blocks to read is based on the number of data
+    // blocks that must be read
+    totalIndexBlocks =
+      CIDesc->getEstimatedIndexBlocksLowerBound(sequentialIOs);
+    randomIOs = totalIndexBlocks;
+  }
+  else  // probes not in order
+  {
+    // Assume all IOs are random. This is a bit pessimistic
+    // because at some point much of the file will be in cache,
+    // so one could argue that as the number of rows updated
+    // or deleted grows large the number of random IOs should
+    // decrease. We'll leave that to future work.
+    sequentialIOs = csZero;
+    totalIndexBlocks =
+      CIDesc->getEstimatedIndexBlocksLowerBound(numRowsToScan);
+    randomIOs = numRowsToScan + totalIndexBlocks;
+  }
+
+} // CostMethodHbaseUpdateOrDelete::computeIOCostsForCursorOperation()
+
+
+
+// ----QUICKSEARCH FOR HbaseUpdate........................................
+
+/**********************************************************************/
+/*                                                                    */
+/*                      CostMethodHbaseUpdate                         */
+/*                                                                    */
+/**********************************************************************/
+
+//*******************************************************************
+// This method computes the cost vector of the HbaseUpdate operation
+//*******************************************************************
+Cost*
+CostMethodHbaseUpdate::scmComputeOperatorCostInternal(RelExpr* op,
+  const PlanWorkSpace* pws,
+  Lng32& countOfStreams)
+{
+  // TODO: Write this method; the line below is a stub
+  return CostMethod::scmComputeOperatorCostInternal(op,pws,countOfStreams);
+}
+
+// ----QUICKSEARCH FOR HbaseDelete........................................
+
+/**********************************************************************/
+/*                                                                    */
+/*                      CostMethodHbaseDelete                         */
+/*                                                                    */
+/**********************************************************************/
+
+//*******************************************************************
+// This method computes the cost vector of the HbaseDelete operation
+//*******************************************************************
+Cost*
+CostMethodHbaseDelete::scmComputeOperatorCostInternal(RelExpr* op,
+  const PlanWorkSpace* pws,
+  Lng32& countOfStreams)
+{
+  const Context * myContext = pws->getContext();
+
+  cacheParameters(op,myContext);
+  estimateDegreeOfParallelism();
+
+  const InputPhysicalProperty* ippForMe =
+    myContext->getInputPhysicalProperty();
+
+  // -----------------------------------------
+  // Save off estimated degree of parallelism.
+  // -----------------------------------------
+  countOfStreams = countOfStreams_;
+
+  HbaseDelete* delOp = (HbaseDelete *)op;   // downcast
+
+  CMPASSERT(partFunc_ != NULL);
+  CostScalar activePartitions =
+   (CostScalar)
+     (((NodeMap *)(partFunc_->getNodeMap()))->getNumActivePartitions());
+  const IndexDesc* CIDesc = delOp->getIndexDesc();
+  const CostScalar & recordSizeInKb = CIDesc->getRecordSizeInKb();
+
+  CostScalar tuplesProcessed(csZero);
+  CostScalar tuplesProduced(csZero);
+  CostScalar tuplesSent(csZero);  // we use tuplesSent to model sending rowIDs to Hbase 
+  CostScalar randomIOs(csZero);
+  CostScalar sequentialIOs(csZero);
+
+  CostScalar countOfAsynchronousStreams = activePartitions;
+
+  // figure out if the probes are in order - if they are, then when
+  // scanning, I/O will tend to be sequential
+
+  NABoolean probesInOrder = FALSE;
+  if (ippForMe != NULL)  // input physical properties exist?
+  {
+    // See if the probes are in order.
+
+    // For delete, a partial order is ok.
+    NABoolean partiallyInOrderOK = TRUE;
+    NABoolean probesForceSynchronousAccess = FALSE;
+    ValueIdList targetSortKey = CIDesc->getOrderOfKeyValues();
+    ValueIdSet sourceCharInputs =
+      delOp->getGroupAttr()->getCharacteristicInputs();
+
+    ValueIdSet targetCharInputs;
+    // The char inputs are still in terms of the source. Map them to the target.
+    // Note: The source char outputs in the ipp have already been mapped to
+    // the target. CharOutputs are a set, meaning they do not have duplicates
+    // But we could have cases where two columns of the target are matched to the
+    // same source column, example: Sol: 10-040416-5166, where we have
+    // INSERT INTO b6table1
+    //		  ( SELECT f, h_to_f, f, 8.4
+    //            FROM btre211
+    //            );
+    // Hence we use lists here instead of sets.
+    // Check to see if there are any duplicates in the source Characteristics inputs
+    // if no, we shall perform set operations, as these are faster
+    ValueIdList bottomValues = delOp->updateToSelectMap().getBottomValues();
+    ValueIdSet bottomValuesSet(bottomValues);
+    NABoolean useListInsteadOfSet = FALSE;
+
+    CascadesGroup* group1 = (*CURRSTMT_OPTGLOBALS->memo)[delOp->getGroupId()];
+
+    GenericUpdate* upOperator = (GenericUpdate *) group1->getFirstLogExpr();
+
+    if (((upOperator->getTableName().getSpecialType() == ExtendedQualName::NORMAL_TABLE
) || (upOperator->getTableName().getSpecialType() == ExtendedQualName::GHOST_TABLE )) &&
+     (bottomValuesSet.entries() != bottomValues.entries() ) )
+    {
+
+      ValueIdList targetInputList;
+      // from here get all the bottom values that appear in the sourceCharInputs
+      bottomValues.findCommonElements(sourceCharInputs );
+      bottomValuesSet = bottomValues;
+
+      // we can use the bottomValues only if these contain some duplicate columns of
+      // characteristics inputs, otherwise we shall use the characteristics inputs.
+      if (bottomValuesSet == sourceCharInputs)
+      {
+        useListInsteadOfSet = TRUE;
+	delOp->updateToSelectMap().rewriteValueIdListUpWithIndex(
+	  targetInputList,
+	  bottomValues);
+	targetCharInputs = targetInputList;
+      }
+    }
+
+    if (!useListInsteadOfSet)
+    {
+      delOp->updateToSelectMap().rewriteValueIdSetUp(
+	targetCharInputs,
+	sourceCharInputs);
+    }
+
+    // If a target key column is covered by a constant on the source side,
+    // then we need to remove that column from the target sort key
+    removeConstantsFromTargetSortKey(&targetSortKey,
+                                   &(delOp->updateToSelectMap()));
+    NABoolean orderedNJ = TRUE;
+    // Don't call ordersMatch if njOuterOrder_ is null.
+    if (ippForMe->getAssumeSortedForCosting())
+      orderedNJ = FALSE;
+    else
+      // if leading keys are not same then don't try ordered NJ.
+      orderedNJ =
+        isOrderedNJFeasible(*(ippForMe->getNjOuterOrder()), targetSortKey);
+
+    if (orderedNJ AND 
+        ordersMatch(ippForMe,
+                    CIDesc,
+                    &targetSortKey,
+                    targetCharInputs,
+                    partiallyInOrderOK,
+                    probesForceSynchronousAccess))
+    {
+      probesInOrder = TRUE;
+      if (probesForceSynchronousAccess)
+      {
+        // The probes form a complete order across all partitions and
+        // the clustering key and partitioning key are the same. So, the
+        // only asynchronous I/O we will see will be due to ESPs. So,
+        // limit the count of streams in DP2 by the count of streams in ESP.
+
+        // Get the logPhysPartitioningFunction, which we will use
+        // to get the logical partitioning function. If it's NULL,
+        // it means the table was not partitioned at all, so we don't
+        // need to limit anything since there already is no asynch I/O.
+
+     // TODO: lppf is always null in Trafodion; figure out what to do instead...
+        const LogPhysPartitioningFunction* lppf =
+            partFunc_->castToLogPhysPartitioningFunction();
+        if (lppf != NULL)
+        {
+          PartitioningFunction* logPartFunc =
+            lppf->getLogPartitioningFunction();
+          // Get the number of ESPs:
+          CostScalar numParts = logPartFunc->getCountOfPartitions();
+
+          countOfAsynchronousStreams = MINOF(numParts,
+                                             countOfAsynchronousStreams);
+        } // lppf != NULL
+      } // probesForceSynchronousAccess
+    } // probes are in order
+  } // if input physical properties exist
+
+  CostScalar currentCpus = 
+    (CostScalar)myContext->getPlan()->getPhysicalProperty()->getCurrentCountOfCPUs();
+  CostScalar activeCpus = MINOF(countOfAsynchronousStreams, currentCpus);
+  CostScalar streamsPerCpu =
+    (countOfAsynchronousStreams / activeCpus).getCeiling();
+
+
+  CostScalar noOfProbesPerPartition(csOne);
+
+  CostScalar numRowsToDelete(csOne);
+  CostScalar numRowsToScan(csOne);
+
+  CostScalar commonComputation;
+
+  // Determine # of rows to scan and to delete
+
+  if (delOp->getSearchKey() && delOp->getSearchKey()->isUnique() &&

+    (noOfProbes_ == 1))
+  {
+    // unique access
+
+    activePartitions = csOne;
+    countOfAsynchronousStreams = csOne;
+    activeCpus = csOne;
+    streamsPerCpu = csOne;
+    numRowsToScan = csOne;
+    // assume the 1 row always satisfies any executor predicates so
+    // we'll always do the Delete
+    numRowsToDelete = csOne;
+  }
+  else
+  {
+    // non-unique access
+
+    numRowsToDelete =
+      ((myRowCount_ / activePartitions).getCeiling()).minCsOne();
+    noOfProbesPerPartition =
+      ((noOfProbes_ / countOfAsynchronousStreams).getCeiling()).minCsOne();
+
+    // need to compute the number of rows that satisfy the key predicates
+    // to compute the I/Os that must be performed
+
+    // need to create a new histogram, since the one from input logical
+    // prop. has the histogram for the table after all executor preds are
+    // applied (i.e. the result cardinality)
+    IndexDescHistograms histograms(*CIDesc,CIDesc->getIndexKey().entries());
+
+    // retrieve all of the key preds in key column order
+    ColumnOrderList keyPredsByCol(CIDesc->getIndexKey());
+    delOp->getSearchKey()->getKeyPredicatesByColumn(keyPredsByCol);
+
+    if ( NOT allKeyColumnsHaveHistogramStatistics( histograms, CIDesc ) )
+    {
+      // All key columns do not have histogram data, the best we can
+      // do is use the number of rows that satisfy all predicates
+      // (i.e. the number of rows we will be updating)
+      numRowsToScan = numRowsToDelete;
+    }
+    else
+    {
+      numRowsToScan = numRowsToScanWhenAllKeyColumnsHaveHistograms(
+	histograms,
+	keyPredsByCol,
+	activePartitions,
+	CIDesc
+	);
+    }
+  }
+
+  // Notes: At execution time, several different TCBs can be created
+  // for a delete. We can class them three ways: Unique, Subset, and
+  // Rowset. Representative examples of the three classes are:
+  //
+  //   ExHbaseUMDtrafUniqueTaskTcb
+  //   ExHbaseUMDtrafSubsetTaskTcb
+  //   ExHbaseAccessSQRowsetTcb
+  //
+  // The theory of operation of each of these differs somewhat. 
+  //
+  // For the Unique variant, we use an HBase "get" to obtain a row, apply
+  // a predicate to it, then do an HBase "delete" to delete it if the
+  // predicate is true. (If there is no predicate, we'll simply do a
+  // "checkAndDelete" so there would be no "get" cost.) 
+  //
+  // For the Subset variant, we use an HBase "scan" to obtain a sequence
+  // of rows, apply a predicate to each, then do an HBase "delete" on
+  // each row that passes the predicate.
+  //
+  // For the Rowset variant, we simply pass all the input keys to 
+  // HBase in batches in HBase "deleteRows" calls. (In Explain plans,
+  // this TCB shows up as "trafodion_delete_vsbb", while the first two
+  // show up as "trafodion_delete".) There is no "get" cost. In plans
+  // with this TCB, there is a separate Scan TCB to obtain the keys,
+  // which then flow to this Rowset TCB via a tuple flow or nested join.
+  // (Such a separate Scan might exist with the first two TCBs also,
+  // e.g., when an index is used to decide which rows to delete.)
+  // The messaging cost to HBase is also reduced since multiple delete
+  // keys are sent per HBase interaction.
+  //
+  // Unfortunately the decisions as to which TCB will be used are
+  // currently made in the generator code and so aren't easily 
+  // available to us here. For the moment then, we make no attempt 
+  // to distinguish a separate "get" cost, nor do we take into account
+  // possible reduced message cost in the Rowset case. Should this
+  // choice be refactored in the future to push it into the Optimizer,
+  // then we can do a better job here. We did attempt to distinguish
+  // the unique case here from the others, but even there our criteria
+  // are not quite the same as in the generator. So at best, this attempt
+  // simply sharpens the cost estimate in this one particular case.
+
+
+  // Compute the I/O cost
+
+  computeIOCostsForCursorOperation(
+    randomIOs /* out */,
+    sequentialIOs /* out */,
+    CIDesc,
+    numRowsToScan,
+    probesInOrder
+    );
+
+  // Compute the tuple cost
+
+  tuplesProduced = numRowsToDelete;
+  tuplesProcessed = numRowsToScan; 
+  tuplesSent = numRowsToDelete;
+
+  CostScalar rowSize = delOp->getIndexDesc()->getRecordLength();
+  CostScalar rowSizeFactor = scmRowSizeFactor(rowSize); 
+  CostScalar outputRowSize = delOp->getGroupAttr()->getRecordLength();
+  CostScalar outputRowSizeFactor = scmRowSizeFactor(outputRowSize);
+
+  tuplesProcessed = tuplesProcessed * rowSizeFactor;
+  tuplesSent = tuplesSent * rowSizeFactor;
+  tuplesProduced = tuplesProduced * outputRowSizeFactor;
+
+
+  // ---------------------------------------------------------------------
+  // Synthesize and return cost object.
+  // ---------------------------------------------------------------------
+
+  CostScalar probeRowSize = delOp->getIndexDesc()->getKeyLength();
+  Cost * deleteCost = 
+    scmCost(tuplesProcessed, tuplesProduced, tuplesSent, randomIOs, sequentialIOs, noOfProbes_,
+	    rowSize, csZero, outputRowSize, probeRowSize);
+
+#ifndef NDEBUG
+if ( CmpCommon::getDefault( OPTIMIZER_PRINT_COST ) == DF_ON )
+    {
+      pfp = stdout;
+      fprintf(pfp, "HbaseDelete::scmComputeOperatorCostInternal()\n");
+      deleteCost->getScmCplr().print(pfp);
+      fprintf(pfp, "HBase Delete elapsed time: ");
+      fprintf(pfp,"%f", deleteCost->
+              convertToElapsedTime(
+                   myContext->getReqdPhysicalProperty()).
+              value());
+      fprintf(pfp,"\n");
+      fprintf(pfp,"CountOfStreams returned %d\n",countOfStreams);
+    }
+#endif
+
+  return deleteCost;
+
+}  // CostMethodHbaseDelete::scmComputeOperatorCostInternal()
+
+// ----QUICKSEARCH FOR HbaseInsert ........................................
+
+/**********************************************************************/
+/*                                                                    */
+/*                      CostMethodHbaseInsert                         */
+/*                                                                    */
+/**********************************************************************/
+
 //**************************************************************
 // This method computes the cost vector of the HbaseInsert operation
 //**************************************************************
@@ -3686,11 +4241,12 @@ CostMethodHbaseInsert::scmComputeOperatorCostInternal(RelExpr* op,
   if (printCost)
     {
       pfp = stdout;
-      fprintf(pfp, "DP2INSERT::scmComputeOperatorCostInternal()\n");
+      fprintf(pfp, "HbaseInsert::scmComputeOperatorCostInternal()\n");
       hbaseInsertCost->getScmCplr().print(pfp);
-      fprintf(pfp, "DP2Insert elapsed time: ");
+      fprintf(pfp, "Hbase Insert elapsed time: ");
       fprintf(pfp, "%f", hbaseInsertCost->convertToElapsedTime(myContext->getReqdPhysicalProperty()).value());
       fprintf(pfp, "\n");
+      fprintf(pfp,"CountOfStreams returned %d\n",countOfStreams);
     }
 #endif
 
@@ -3702,6 +4258,10 @@ CostMethodHbaseInsert::scmComputeOperatorCostInternal(RelExpr* op,
   return hbaseInsertCost;
 }
 
+/**********************************************************************/
+// End cost methods for WRITE DML operations
+/**********************************************************************/
+
 //<pb>
 
 //**************************************************************

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/optimizer/TransRule.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/TransRule.cpp b/core/sql/optimizer/TransRule.cpp
index 4d74bd6..356e44a 100644
--- a/core/sql/optimizer/TransRule.cpp
+++ b/core/sql/optimizer/TransRule.cpp
@@ -3287,6 +3287,15 @@ NABoolean TSJRule::topMatch (RelExpr * expr,
       ((updateExpr->getOperatorType() == REL_UNARY_DELETE) && updateExpr->isMtsStatement()))
     return FALSE;
 
+  // It is not semantically correct to convert a MERGE having a 
+  // "NOT MATCHED" action to a TSJ, since the former has right 
+  // join semantics. (If we converted here to a TSJ, a non-matching
+  // row would not be returned by the outer child scan node, so
+  // the inner child merge node would never see it and hence the
+  // "NOT MATCHED" logic would not be activiated.)
+  if (updateExpr->isMerge() && updateExpr->insertValues())
+    return FALSE;
+
   return TRUE;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/regress/executor/EXPECTED015.SB
----------------------------------------------------------------------
diff --git a/core/sql/regress/executor/EXPECTED015.SB b/core/sql/regress/executor/EXPECTED015.SB
index d50a603..02efa0c 100755
--- a/core/sql/regress/executor/EXPECTED015.SB
+++ b/core/sql/regress/executor/EXPECTED015.SB
@@ -1113,7 +1113,7 @@ update t015t3 set (b,c) = 10,20;
 +>  when matched then delete
 +>  when not matched then insert values (1,2,3);
 
-*** ERROR[3241] This MERGE statement is not supported. Reason:  Non-unique ON clause not
allowed with INSERT.
+*** ERROR[3241] This MERGE statement is not supported. Reason:  MERGE DELETE not allowed
with INSERT.
 
 *** ERROR[8822] The statement was not prepared.
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp
----------------------------------------------------------------------
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp b/core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp
index 9bf3ee0..28cd8d7 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLcleanup.cpp
@@ -486,6 +486,22 @@ short CmpSeabaseMDcleanup::deleteMDentries(ExeCliInterface *cliInterface)
   NABoolean errorSeen = FALSE;
 
   // OBJECTS table
+
+  // Must first hide the index to OBJECTS, because the delete plan would otherwise
+  // likely access OBJECTS_UNIQ_IDX first then join that to OBJECTS (as OBJECT_UID
+  // is the leading part of the index key). If the index row were missing, we'd 
+  // fail to delete the base table row. Right now OBJECTS is the only metadata
+  // table with an index, so this is the only place we need to take this precaution.
+
+  cliRC = cliInterface->holdAndSetCQD("HIDE_INDEXES","ALL",CmpCommon::diags());
+  if (cliRC < 0)
+    {
+      if (processCleanupErrors(cliInterface, errorSeen))
+        return -1;
+    } 
+
+  // Now delete from OBJECTS (but not its index)
+
   str_sprintf(query, "delete from %s.\"%s\".%s where object_uid = %Ld",
               getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS,
               objUID_);
@@ -495,6 +511,15 @@ short CmpSeabaseMDcleanup::deleteMDentries(ExeCliInterface *cliInterface)
       if (processCleanupErrors(cliInterface, errorSeen))
         return -1;
     }
+ 
+  // Restore previous setting of CQD HIDE_INDEXES
+
+  cliRC = cliInterface->restoreCQD("HIDE_INDEXES",CmpCommon::diags());
+  if (cliRC < 0)
+    {
+      if (processCleanupErrors(cliInterface, errorSeen))
+        return -1;
+    } 
   
   // OBJECTS index
   str_sprintf(query, "delete from table(index_table %s.\"%s\".%s) where \"OBJECT_UID@\" =
%Ld",

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/sqlcomp/DefaultConstants.h
----------------------------------------------------------------------
diff --git a/core/sql/sqlcomp/DefaultConstants.h b/core/sql/sqlcomp/DefaultConstants.h
index 20193f2..c702b8f 100644
--- a/core/sql/sqlcomp/DefaultConstants.h
+++ b/core/sql/sqlcomp/DefaultConstants.h
@@ -37,7 +37,7 @@
  *
  ***************************************************************************** */
 
-// This enum contains defaults used in SQLARK.
+// This enum contains defaults used in Trafodion.
 // To add a default, include it in this enum and in DefaultDefaults of
 // sqlcomp/NADefaults.cpp.
 //
@@ -54,7 +54,6 @@
 // ***************************************************************************
 // ***************************************************************************
 // NOTE: DO NOT make this enum non-contiguous.
-// Add new defaults after the last entry in this enum.
 // Valid attributes must begin at zero and there must be no holes.
 // The algorithm to read defaults from the defaults table requires this.
 // You must always add a default default in NADefaults.cpp, otherwise
@@ -3761,6 +3760,13 @@ enum DefaultConstants
 
   USTAT_MAX_CHAR_DATASIZE_FOR_IS,
 
+  // If the next two are 'ON' we use the HBase costing code; if they
+  // are 'OFF' we use a stub cost of 1 for Updates and Deletes to
+  // Trafodion or HBase tables instead. We'll remove these once the
+  // costing code has broader exposure.
+  HBASE_DELETE_COSTING,
+  HBASE_UPDATE_COSTING,
+
   // This enum constant must be the LAST one in the list; it's a count,
   // not an Attribute (it's not IN DefaultDefaults; it's the SIZE of it)!
   __NUM_DEFAULT_ATTRIBUTES

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8e0b3d44/core/sql/sqlcomp/nadefaults.cpp
----------------------------------------------------------------------
diff --git a/core/sql/sqlcomp/nadefaults.cpp b/core/sql/sqlcomp/nadefaults.cpp
index 107c23f..9e23cfe 100644
--- a/core/sql/sqlcomp/nadefaults.cpp
+++ b/core/sql/sqlcomp/nadefaults.cpp
@@ -1750,6 +1750,11 @@ SDDkwd__(EXE_DIAGNOSTIC_EVENTS,		"OFF"),
 
  DDkwd__(HBASE_DATA_BLOCK_ENCODING_OPTION,		             ""),
 
+ // If set to 'OFF' we get a stub cost of 1 for delete operations.
+ // We can remove this once the delete costing code has broader
+ // exposure.
+ DDkwd__(HBASE_DELETE_COSTING,		             "ON"),
+
  DDkwd__(HBASE_FILTER_PREDS,		             "OFF"),
  DDkwd__(HBASE_HASH2_PARTITIONING,                   "ON"),
  DDui___(HBASE_INDEX_LEVEL,                          "0"),
@@ -1780,6 +1785,13 @@ SDDkwd__(EXE_DIAGNOSTIC_EVENTS,		"OFF"),
   DDkwd__(HBASE_SQL_IUD_SEMANTICS,		"ON"),
   DDkwd__(HBASE_STATS_PARTITIONING,           	"ON"),
   DDkwd__(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT,		"OFF"),
+
+  // If set to 'OFF' we get a stub cost of 1 for update operations.
+  // We can remove this once the delete costing code has broader
+  // exposure. This is 'OFF' at the moment because the update code
+  // is only partially written.
+  DDkwd__(HBASE_UPDATE_COSTING,		             "OFF"),
+
   DDkwd__(HBASE_UPDEL_CURSOR_OPT,		"ON"),
   DDui___(HBASE_USE_FAKED_REGIONS,		"0"),
   DD_____(HBASE_ZOOKEEPER_PORT,                 ""),



Mime
View raw message