db-derby-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From andre...@apache.org
Subject svn commit: r423034 - in /db/derby/code/trunk/java/engine/org/apache/derby: iapi/store/access/ iapi/types/ impl/sql/execute/
Date Tue, 18 Jul 2006 10:02:25 GMT
Author: andreask
Date: Tue Jul 18 03:02:24 2006
New Revision: 423034

URL: http://svn.apache.org/viewvc?rev=423034&view=rev
Log:
DERBY-802 OufOfMemoryError when using BLOB from scrollable insensitive result sets. The fix
is to avoid the use of cloning the rows before inserting them into the BackingStoreHashTable.
The BackingStoreHashTable has been fixed so that it does not do unnecessary cloning. The estimateMemoryUsage
methods of SQLBinary and its subclasses has been improved to return a better estimate, so
that rows with BLOBs will be backed to disk when using BackingStoreHashTable. Tests have been
committed separately.

Modified:
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBinary.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBit.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBlob.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLLongVarbit.java
    db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLVarbit.java
    db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java
    db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java
(original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java
Tue Jul 18 03:02:24 2006
@@ -279,16 +279,8 @@
                     double rowUsage = getEstimatedMemUsage(row);
                     hash_table = new Hashtable((int)(max_inmemory_size / rowUsage));
                 }
-
-                if (needsToClone)
-                {
-                    row = cloneRow(row);
-                }
-
-                Object key = 
-                    KeyHasher.buildHashKey(row, key_column_numbers);
-
-                add_row_to_hash_table(hash_table, key, row);
+               
+                add_row_to_hash_table(hash_table, row, needsToClone);
             }
         }
 
@@ -379,23 +371,52 @@
     }
 
     /**
+     * Return a shallow cloned row
+     *
+     * @return The cloned row row to use.
+     *
+     * @exception  StandardException  Standard exception policy.
+     **/
+    static DataValueDescriptor[] shallowCloneRow(DataValueDescriptor[] old_row)
+        throws StandardException
+    {
+        DataValueDescriptor[] new_row = new DataValueDescriptor[old_row.length];
+        // the only difference between getClone and cloneObject is cloneObject does
+        // not objectify a stream.  We use cloneObject here.  DERBY-802
+        for (int i = 0; i < old_row.length; i++)
+        {
+            if( old_row[i] != null)
+                new_row[i] = (DataValueDescriptor) 
+                    ((CloneableObject) old_row[i]).cloneObject();
+        }
+
+        return(new_row);
+    }
+
+    /**
      * Do the work to add one row to the hash table.
      * <p>
      *
      * @param row               Row to add to the hash table.
      * @param hash_table        The java HashTable to load into.
+     * @param needsToClone      If the row needs to be cloned
      *
 	 * @exception  StandardException  Standard exception policy.
      **/
     private void add_row_to_hash_table(
     Hashtable   hash_table,
-    Object      key,
-    Object[]    row)
+    Object[]    row,
+    boolean needsToClone )
 		throws StandardException
     {
-        if( spillToDisk( hash_table, key, row))
+        if( spillToDisk( hash_table, row))
             return;
         
+        if (needsToClone)
+        {
+            row = cloneRow(row);
+        }
+        Object key = KeyHasher.buildHashKey(row, key_column_numbers);
         Object  duplicate_value = null;
 
         if ((duplicate_value = hash_table.put(key, row)) == null)
@@ -451,7 +472,6 @@
      * Determine whether a new row should be spilled to disk and, if so, do it.
      *
      * @param hash_table The in-memory hash table
-     * @param key The row's key
      * @param row
      *
      * @return true if the row was spilled to disk, false if not
@@ -459,7 +479,6 @@
      * @exception  StandardException  Standard exception policy.
      */
     private boolean spillToDisk( Hashtable   hash_table,
-                                 Object      key,
                                  Object[]    row)
 		throws StandardException
     {
@@ -472,7 +491,8 @@
                 if( inmemory_rowcnt < max_inmemory_rowcnt)
                     return false; // Do not spill
             }
-            else if( max_inmemory_size > 0)
+            else if( max_inmemory_size > getEstimatedMemUsage(row))
+                
                 return false;
             // Want to start spilling
             if( ! (row instanceof DataValueDescriptor[]))
@@ -488,7 +508,7 @@
                                                remove_duplicates,
                                                keepAfterCommit);
         }
-        
+        Object key = KeyHasher.buildHashKey(row, key_column_numbers);
         Object duplicateValue = hash_table.get( key);
         if( duplicateValue != null)
         {
@@ -727,11 +747,6 @@
 			}
 		}
 
-        if (needsToClone)
-        {
-            row = cloneRow(row);
-        }
-
         Object key = KeyHasher.buildHashKey(row, key_column_numbers);
 
         if ((remove_duplicates) && (get(key) != null))
@@ -740,7 +755,7 @@
         }
         else
         {
-            add_row_to_hash_table(hash_table, key, row);
+            add_row_to_hash_table(hash_table, row, needsToClone);
             return(true);
         }
     }

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java
(original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java
Tue Jul 18 03:02:24 2006
@@ -206,8 +206,10 @@
                         return this;
 
                     rowCount++;
-                    if( rowCount == 1)
-                        retValue = BackingStoreHashtable.cloneRow( row);
+                    if( rowCount == 1) 
+                    {
+                        retValue = BackingStoreHashtable.shallowCloneRow( row);         
              
+                    } 
                     else 
                     {
                         Vector v;
@@ -218,8 +220,10 @@
                             retValue = v;
                         }
                         else
+                        {
                             v = (Vector) retValue;
-                        v.add( BackingStoreHashtable.cloneRow( row));
+                        }
+                        v.add( BackingStoreHashtable.shallowCloneRow( row));
                     }
                     if( remove)
                     {
@@ -348,7 +352,7 @@
             try
             {
                 scan.fetch( row);
-                Object retValue =  BackingStoreHashtable.cloneRow( row);
+                Object retValue =  BackingStoreHashtable.shallowCloneRow( row);
                 hasMore = scan.next();
                 if( ! hasMore)
                 {

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBinary.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBinary.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBinary.java (original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBinary.java Tue Jul 18
03:02:24 2006
@@ -109,14 +109,23 @@
 
     public int estimateMemoryUsage()
     {
-        int sz = BASE_MEMORY_USAGE;
-        if( null != dataValue)
-            sz += dataValue.length;
-        return sz;
+        if (dataValue == null) {
+            if (streamValueLength>=0) {
+                return BASE_MEMORY_USAGE + streamValueLength;
+            } else {
+                return getMaxMemoryUsage();
+            }
+        } else {
+            return BASE_MEMORY_USAGE + dataValue.length;
+        }
     } // end of estimateMemoryUsage
-
 	  
 	  
+	/**
+	 * Return max memory usage for a SQL Binary
+	 */
+	abstract int getMaxMemoryUsage();
+
 	 /*
 	 * object state
 	 */
@@ -502,7 +511,7 @@
 		if (stream == null)
 			return getClone();
 		SQLBinary self = (SQLBinary) getNewNull();
-		self.setStream(stream);
+		self.setValue(stream, streamValueLength);
 		return self;
 	}
 

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBit.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBit.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBit.java (original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBit.java Tue Jul 18 03:02:24
2006
@@ -21,6 +21,7 @@
 package org.apache.derby.iapi.types;
 
 import org.apache.derby.iapi.reference.SQLState;
+import org.apache.derby.iapi.reference.Limits;
 
 import org.apache.derby.iapi.services.io.ArrayInputStream;
 
@@ -77,6 +78,14 @@
 	public String getTypeName()
 	{
 		return TypeId.BIT_NAME;
+	}
+
+	/**
+	 * Return max memory usage for a SQL Bit
+	 */
+	int getMaxMemoryUsage()
+	{
+		return Limits.DB2_CHAR_MAXWIDTH;
 	}
 
 	/*

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBlob.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBlob.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBlob.java (original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLBlob.java Tue Jul 18 03:02:24
2006
@@ -26,6 +26,7 @@
 import org.apache.derby.iapi.types.BitDataValue;
 import org.apache.derby.iapi.types.DataValueDescriptor;
 import org.apache.derby.iapi.reference.SQLState;
+import org.apache.derby.iapi.reference.Limits;
 import org.apache.derby.iapi.error.StandardException;
 
 import org.apache.derby.iapi.types.Orderable;
@@ -80,6 +81,14 @@
         {
 			return TypeId.BLOB_NAME;
         }
+
+	/**
+	 * Return max memory usage for a SQL Blob
+	 */
+	int getMaxMemoryUsage()
+	{
+		return Limits.DB2_LOB_MAXWIDTH;
+	}
 
     /**
      * @see DataValueDescriptor#getNewNull

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLLongVarbit.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLLongVarbit.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLLongVarbit.java (original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLLongVarbit.java Tue Jul
18 03:02:24 2006
@@ -26,6 +26,7 @@
 import org.apache.derby.iapi.types.BitDataValue;
 import org.apache.derby.iapi.types.DataValueDescriptor;
 import org.apache.derby.iapi.reference.SQLState;
+import org.apache.derby.iapi.reference.Limits;
 import org.apache.derby.iapi.error.StandardException;
 
 import org.apache.derby.iapi.types.Orderable;
@@ -56,6 +57,14 @@
 	public String getTypeName()
 	{
 		return TypeId.LONGVARBIT_NAME;
+	}
+
+	/**
+	 * Return max memory usage for a SQL LongVarbit
+	 */
+	int getMaxMemoryUsage()
+	{
+		return Limits.DB2_LONGVARCHAR_MAXWIDTH;
 	}
 
 	/**

Modified: db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLVarbit.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLVarbit.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLVarbit.java (original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/iapi/types/SQLVarbit.java Tue Jul 18
03:02:24 2006
@@ -26,6 +26,7 @@
 import org.apache.derby.iapi.types.BitDataValue;
 import org.apache.derby.iapi.types.DataValueDescriptor;
 import org.apache.derby.iapi.reference.SQLState;
+import org.apache.derby.iapi.reference.Limits;
 import org.apache.derby.iapi.error.StandardException;
 
 import org.apache.derby.iapi.types.Orderable;
@@ -58,6 +59,14 @@
 	public String getTypeName()
 	{
 		return TypeId.VARBIT_NAME;
+	}
+
+	/**
+	 * Return max memory usage for a SQL Varbit
+	 */
+	int getMaxMemoryUsage()
+	{
+		return Limits.DB2_VARCHAR_MAXWIDTH;
 	}
 
 	/**

Modified: db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java
(original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java
Tue Jul 18 03:02:24 2006
@@ -532,15 +532,40 @@
 	public ExecRow doBaseRowProjection(ExecRow sourceRow)
 		throws StandardException
 	{
-		ExecRow result = null;
+		final ExecRow result;
 		if (source instanceof ProjectRestrictResultSet) {
 			ProjectRestrictResultSet prs = (ProjectRestrictResultSet) source;
 			result = prs.doBaseRowProjection(sourceRow);
 		} else {
-			result = sourceRow.getClone();
+			result = sourceRow.getNewNullRow();
+			result.setRowArray(sourceRow.getRowArray());
 		}
 		return doProjection(result);
 	}
+
+	/**
+	 * Get projection mapping array. The array consist of indexes which
+	 * maps the column in a row array to another position in the row array.
+	 * If the value is projected out of the row, the value is negative.
+	 * @return projection mapping array.
+	 */
+	public int[] getBaseProjectMapping() 
+	{
+		final int[] result;
+		if (source instanceof ProjectRestrictResultSet) {
+			result = new int[projectMapping.length];
+			final ProjectRestrictResultSet prs = (ProjectRestrictResultSet) source;
+			final int[] sourceMap = prs.getBaseProjectMapping();
+			for (int i=0; i<projectMapping.length; i++) {
+				if (projectMapping[i] > 0) {
+					result[i] = sourceMap[projectMapping[i] - 1];
+				}
+			}
+		} else {
+			result = projectMapping;
+		}
+		return result;
+	} 
 	
 	/**
 	 * Is this ResultSet or it's source result set for update

Modified: db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java
URL: http://svn.apache.org/viewvc/db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java?rev=423034&r1=423033&r2=423034&view=diff
==============================================================================
--- db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java
(original)
+++ db/derby/code/trunk/java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java
Tue Jul 18 03:02:24 2006
@@ -229,9 +229,8 @@
 		 * The 1st column, the position in the
 		 * scan, will be the key column.
 		 */
-		int[] keyCols = new int[1];
-		// keyCols[0] = 0; // not req. arrays initialized to zero
-
+		final int[] keyCols = new int[] { 0 };
+		
 		/* We don't use the optimizer row count for this because it could be
 		 * wildly pessimistic.  We only use Hash tables when the optimizer row count
 		 * is within certain bounds.  We have no alternative for scrolling insensitive 
@@ -992,12 +991,12 @@
 		 * and we do our own cloning since the 1st column
 		 * is not a wrapper.
 		 */
-		DataValueDescriptor[] sourceRowArray = sourceRow.getRowArrayClone();
+		DataValueDescriptor[] sourceRowArray = sourceRow.getRowArray();
 
 		System.arraycopy(sourceRowArray, 0, hashRowArray, extraColumns, 
 				sourceRowArray.length);
 
-		ht.put(false, hashRowArray);
+		ht.put(true, hashRowArray);
 
 		numToHashTable++;
 	}
@@ -1058,6 +1057,31 @@
 
 		return resultRow;
 	}
+	
+	/**
+	 * Get the row data at the specified position 
+	 * from the hash table.
+	 *
+	 * @param position	The specified position.
+	 *
+	 * @return	The row data at that position.
+	 *
+ 	 * @exception StandardException thrown on failure 
+	 */
+	private DataValueDescriptor[] getRowArrayFromHashTable(int position)
+		throws StandardException
+	{
+		positionInHashTable.setValue(position);
+		final DataValueDescriptor[] hashRowArray = (DataValueDescriptor[]) 
+			ht.get(positionInHashTable);
+		
+		// Copy out the Object[] without the position.
+		final DataValueDescriptor[] resultRowArray = new 
+			DataValueDescriptor[hashRowArray.length - extraColumns];
+		System.arraycopy(hashRowArray, extraColumns, resultRowArray, 0, 
+						 resultRowArray.length);
+		return resultRowArray;
+	}
 
 	/**
 	 * Positions the cursor in the last fetched row. This is done before
@@ -1082,10 +1106,13 @@
 	 * in the hash table with the new values for the row.
 	 */
 	public void updateRow(ExecRow row) throws StandardException {
-		ExecRow newRow = row.getClone();
+		ExecRow newRow = row;
+		boolean undoProjection = false;
+		
 		if (source instanceof ProjectRestrictResultSet) {
 			newRow = ((ProjectRestrictResultSet)source).
-					doBaseRowProjection(newRow);
+				doBaseRowProjection(row);
+			undoProjection = true;
 		}
 		positionInHashTable.setValue(currentPosition);
 		DataValueDescriptor[] hashRowArray = (DataValueDescriptor[]) 
@@ -1093,6 +1120,32 @@
 		RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
 		ht.remove(new SQLInteger(currentPosition));
 		addRowToHashTable(newRow, currentPosition, rowLoc, true);
+		
+		// Modify row to refer to data in the BackingStoreHashtable.
+		// This allows reading of data which goes over multiple pages
+		// when doing the actual update (LOBs). Putting columns of
+		// type SQLBinary to disk, has destructive effect on the columns,
+		// and they need to be re-read. That is the reason this is needed.
+		if (undoProjection) {
+			
+			final DataValueDescriptor[] newRowData = newRow.getRowArray();
+			
+			// Array of original position in row
+			final int[] origPos =((ProjectRestrictResultSet)source).
+				getBaseProjectMapping(); 
+			
+			// We want the row to contain data backed in BackingStoreHashtable
+			final DataValueDescriptor[] backedData = 
+				getRowArrayFromHashTable(currentPosition);
+			
+			for (int i=0; i<origPos.length; i++) {
+				if (origPos[i]>=0) {
+					row.setColumn(origPos[i], backedData[i]);
+				}
+			}
+		} else {
+			row.setRowArray(getRowArrayFromHashTable(currentPosition));
+		}
 	}
 
 	/**
@@ -1112,7 +1165,7 @@
 			hashRowArray[i].setToNull();
 		}
 
-		ht.put(false, hashRowArray);
+		ht.put(true, hashRowArray);
 	}
 
 	/**



Mime
View raw message