db-derby-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mi...@apache.org
Subject svn commit: r169492 - in /incubator/derby/code/trunk/java: engine/org/apache/derby/iapi/db/ engine/org/apache/derby/impl/store/access/btree/index/ engine/org/apache/derby/impl/store/access/heap/ engine/org/apache/derby/impl/store/raw/data/ engine/org/apache/derby/impl/store/raw/log/ testing/org/apache/derbyTesting/functionTests/master/ testing/org/apache/derbyTesting/functionTests/tests/store/
Date Tue, 10 May 2005 16:30:10 GMT
Author: mikem
Date: Tue May 10 09:30:08 2005
New Revision: 169492

URL: http://svn.apache.org/viewcvs?rev=169492&view=rev
Log:
DERBY-132, inplace compress.  Add btree truncate support.  Enhance OnlineCompressTest.java test.


Modified:
    incubator/derby/code/trunk/java/engine/org/apache/derby/iapi/db/OnlineCompress.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2I.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking1.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLockingRR.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/heap/Heap.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java
    incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/log/CheckpointOperation.java
    incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/master/OnlineCompressTest.out
    incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/BaseTest.java
    incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest.java
    incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest_derby.properties

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/iapi/db/OnlineCompress.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/iapi/db/OnlineCompress.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/iapi/db/OnlineCompress.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/iapi/db/OnlineCompress.java Tue May 10 09:30:08 2005
@@ -98,6 +98,10 @@
 increase the number of pages affected.  This option itself does no scans of
 the table, so performs on the order of a few system calls.
 
+TODO LIST:
+o defragment requires table level lock in nested user transaction, which
+  will conflict with user lock on same table in user transaction.
+
 **/
 public class OnlineCompress
 {

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2I.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2I.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2I.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2I.java Tue May 10 09:30:08 2005
@@ -791,7 +791,35 @@
     Transaction                     rawtran)
         throws StandardException
     {
-        // TODO - need to implement for btree
+		B2IController b2ic = new B2IController();
+
+		try
+		{
+            int open_mode = TransactionController.OPENMODE_FORUPDATE;
+
+            // Do the actual open of the container in the super class.
+            b2ic.init(
+                xact_manager,                    // current transaction   
+                xact_manager.getRawStoreXact(),  // current raw store xact
+                open_mode,
+                TransactionController.MODE_TABLE,
+                xact_manager.getRawStoreXact().newLockingPolicy(
+                    LockingPolicy.MODE_CONTAINER,
+                    TransactionController.ISOLATION_SERIALIZABLE, true),
+                true,
+                this, 
+                new B2IUndo(),
+                (B2IStaticCompiledInfo) null,
+                (DynamicCompiledOpenConglomInfo) null);
+
+            b2ic.getContainer().compressContainer();
+
+		}
+		finally
+		{
+			b2ic.close();
+		}
+
         return;
     }
 

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking1.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking1.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking1.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking1.java Tue May 10 09:30:08 2005
@@ -128,11 +128,10 @@
      * @param open_btree        The open_btree to associate latches with - 
      *                          used if routine has to scan backward.
      * @param btree             the conglomerate info.
-     * @param leaf              The control row of the current leaf to lock.
-     * @param slot              The slot position of the row to lock.
+     * @param pos               The position of the row to lock.
      * @param request_scan_lock Whether to request the page scan lock, should
      *                          only be requested once per page in the scan.
-     * @param scratch_template  A scratch area to use to read in rows.
+     * @param lock_template     A scratch area to use to read in rows.
      * @param previous_key_lock Is this a previous key lock call?
      * @param forUpdate         Is the scan for update or for read only.
      *

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java Tue May 10 09:30:08 2005
@@ -159,10 +159,9 @@
      * have 2 different containerid's, be more creative with the record id, or
      * even add more to the lock key.
      *
-     * @param open_btree        The open btree to associate this lock with.
      * @param aux_leaf          If non-null, this leaf is unlatched if the 
      *                          routine has to wait on the lock.
-     * @param forUpdate         Whether to lock exclusive or share.
+     * @param lock_operation    Whether to lock exclusive or share.
      * @param lock_duration     For what duration should the lock be held,
      *                          if INSTANT_DURATION, then the routine will
      *                          guarantee that lock was acquired while holding
@@ -247,8 +246,7 @@
      *                          used for locking.
      * @param check_changed_rowloc
      *                          whether to check for the changed rowloc or not.
-     * @param forUpdate         Whether to wait for lock.
-     * @param forUpdatePrevKey  Whether lock is for key prev to insert or not.
+     * @param lock_operation    Whether lock is for key prev to insert or not.
      * @param lock_duration     For what duration should the lock be held,
      *                          if INSTANT_DURATION, then the routine will
      *                          guarantee that lock was acquired while holding
@@ -499,14 +497,14 @@
      *                          used if routine has to scan backward.
      * @param btree             the conglomerate info.
      * @param leaf              The control row of the current leaf to lock.
-     * @param slot              The slot position of the row to lock.
+     * @param pos               The position of the row to lock.
      * @param request_row_lock  Whether to request the row lock, should
      *                          only be requested once per page in the scan.
      * @param request_scan_lock Whether to request the page scan lock, should
      *                          only be requested once per page in the scan.
-     * @param lock_fetchDescriptor The fetch descriptor to use to fetch the
+     * @param lock_fetch_desc   The fetch descriptor to use to fetch the
      *                          row location for the lock request.
-     * @param scratch_template  A scratch area to use to read in rows.
+     * @param lock_template     A scratch area to use to read in rows.
      * @param previous_key_lock Is this a previous key lock call?
      * @param forUpdate         Is the scan for update or for read only.
      *
@@ -803,11 +801,10 @@
      * @param open_btree        The open_btree to associate latches with - 
      *                          used if routine has to scan backward.
      * @param btree             the conglomerate info.
-     * @param leaf              The control row of the current leaf to lock.
-     * @param slot              The slot position of the row to lock.
+     * @param pos               The position of the row to lock.
      * @param request_scan_lock Whether to request the page scan lock, should
      *                          only be requested once per page in the scan.
-     * @param scratch_template  A scratch area to use to read in rows.
+     * @param lock_template     A scratch area to use to read in rows.
      * @param previous_key_lock Is this a previous key lock call?
      * @param forUpdate         Is the scan for update or for read only.
      *
@@ -846,7 +843,6 @@
      *
      * For serializable, there is no work to do.
      *
-     * @param row_qualified     Did the row qualify to be returned to caller.
      *
      **/
     public void unlockScanRecordAfterRead(

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLockingRR.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLockingRR.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLockingRR.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLockingRR.java Tue May 10 09:30:08 2005
@@ -92,11 +92,10 @@
      * @param open_btree        The open_btree to associate latches with - 
      *                          used if routine has to scan backward.
      * @param btree             the conglomerate info.
-     * @param leaf              The control row of the current leaf to lock.
-     * @param slot              The slot position of the row to lock.
+     * @param pos               The position of the row to lock.
      * @param request_scan_lock Whether to request the page scan lock, should
      *                          only be requested once per page in the scan.
-     * @param scratch_template  A scratch area to use to read in rows.
+     * @param lock_template     A scratch area to use to read in rows.
      * @param previous_key_lock Is this a previous key lock call?
      * @param forUpdate         Is the scan for update or for read only.
      *

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/heap/Heap.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/heap/Heap.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/heap/Heap.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/access/heap/Heap.java Tue May 10 09:30:08 2005
@@ -757,11 +757,13 @@
                 this,
                 this.format_ids,
                 nested_xact,
-                rawtran,
+                nested_xact.getRawStoreXact(),
                 true,
                 TransactionController.OPENMODE_FORUPDATE,
                 TransactionController.MODE_RECORD,
-                null,
+                nested_xact.getRawStoreXact().newLockingPolicy(
+                    LockingPolicy.MODE_RECORD,
+                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                 null) == null)
             {
                 throw StandardException.newException(
@@ -791,6 +793,14 @@
                     // by a remove.
                     open_conglom.getXactMgr().commitNoSync(
                                 TransactionController.RELEASE_LOCKS);
+
+                    // the commit closes the underlying container, so let
+                    // the heapcontroller know this has happened.  Usually
+                    // the transaction takes care of this, but this controller
+                    // is internal, so the transaction does not know about it.
+                    heapcontroller.closeForEndTransaction(false);
+                    
+                    // the commit will close the underlying 
                     open_conglom.reopen();
                 }
                 else
@@ -843,7 +853,9 @@
                     false,
                     TransactionController.OPENMODE_FORUPDATE,
                     TransactionController.MODE_RECORD,
-                    null,
+                    rawtran.newLockingPolicy(
+                        LockingPolicy.MODE_RECORD,
+                        TransactionController.ISOLATION_REPEATABLE_READ, true),
                     null) == null)
             {
                 throw StandardException.newException(
@@ -895,7 +907,9 @@
                 hold,
                 open_mode,
                 lock_level,
-                null,
+                rawtran.newLockingPolicy(
+                    LockingPolicy.MODE_RECORD,
+                    TransactionController.ISOLATION_REPEATABLE_READ, true),
                 null) == null)
         {
             throw StandardException.newException(

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java Tue May 10 09:30:08 2005
@@ -2368,13 +2368,10 @@
      * Up here means from low slot to high slot (e.g from slot 2 to slot 3).
      * Our slot table grows backward so we have to be careful here.
      *
-     * @param param1 param1 does this.
-     * @param param2 param2 does this.
-     *
      * @param slot                  Position the new slot will take
      * @param recordOffset          Offset of the record for the new slot
      * @param recordPortionLength   Length of the record stored in the new slot
-     * @param recordPortionLength   Length of reserved space of record in slot
+     * @param reservedSpace         Length of reserved space of record in slot
      *
      **/
 	private void addSlotEntry(
@@ -4541,16 +4538,17 @@
      * <p>
      * The column is read into the object located in row[qual_colid].
      *
-     * @param row           column is read into object in row[qual_colid].
-     * @param colid         the column id to read, colid N is row[N]
-     * @param dataIn        the stream to read the column in row from.
-     * @param recordHeader  the record header of the row to read column from.
-     * @param recordToLock  record handle to lock, used by overflow column code.
+     * @param row                   col is read into object in row[qual_colid].
+     * @param offset_to_field_data  offset in bytes from top of page to field
+     * @param colid                 the column id to read, colid N is row[N]
+     * @param recordHeader          record header of row to read column from.
+     * @param recordToLock          record handle to lock, 
+     *                              used by overflow column code.
      *
 	 * @exception  StandardException  Standard exception policy.
      **/
 	private final void readOneColumnFromPage(
-    Object[]   row, 
+    Object[]                row, 
     int                     colid,
     int                     offset_to_field_data,
     StoredRecordHeader      recordHeader,
@@ -4899,18 +4897,15 @@
      *
 	 * @return Whether or not the row input qualifies.
      *
-     * @param row               restore row into this object array.
-     * @param validColumns      If not null, bit map indicates valid cols.
-     * @param materializedCols  Which columns have already been read into row?
-     * @param lrdi              restore row from this stream.
-     * @param recordHeader      The record header of the row, it was read in 
-     *                          from stream and dataIn is positioned after it.
-     * @param inUserCode        see comments above about jit bug. 
-     * @param qualifier_list    An array of qualifiers to apply to the row, only
-     *                          return row if qualifiers are all true, if array
-     *                          is null always return the row.
-     * @param recordToLock      The head row to use for locking, used to lock 
-     *                          head row of overflow columns/rows.
+     * @param row                   restore row into this object array.
+     * @param offset_to_row_data    offset in bytes from top of page to row
+     * @param fetchDesc             Description of fetch including which cols
+     *                              and qualifiers.
+     * @param recordHeader          The record header of the row, it was read 
+     *                              in from stream and dataIn is positioned 
+     *                              after it.
+     * @param recordToLock          The head row to use for locking, used to 
+     *                              lock head row of overflow columns/rows.
      *
 	 * @exception  StandardException  Standard exception policy.
      **/
@@ -7732,7 +7727,7 @@
 		Shift data within a record to account for an update.
 
 		@param offset  Offset where the update starts, need not be on a field boundry.
-		@param oldLenght length of the data being replaced
+		@param oldLength length of the data being replaced
 		@param newLength length of the data replacing the old data
 
 		@return the length of the data in the record after the replaced data.
@@ -7952,7 +7947,6 @@
 		{
 			if (SanityManager.DEBUG_ON("DeadlockTrace") || SanityManager.DEBUG_ON("userLockStackTrace"))
 				return "page = " + getIdentity();
-
 
 			String str = "---------------------------------------------------\n";
 			str += pageHeaderToString();

Modified: incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/log/CheckpointOperation.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/log/CheckpointOperation.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/log/CheckpointOperation.java (original)
+++ incubator/derby/code/trunk/java/engine/org/apache/derby/impl/store/raw/log/CheckpointOperation.java Tue May 10 09:30:08 2005
@@ -142,9 +142,6 @@
 		the operation need to prepare the optional data for this method.
 
 		Checkpoint has no optional data to write out
-
-		@param out Where and how to write to optional data.
-		
 	*/
 	public ByteArray getPreparedLog()
 	{

Modified: incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/master/OnlineCompressTest.out
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/master/OnlineCompressTest.out?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/master/OnlineCompressTest.out (original)
+++ incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/master/OnlineCompressTest.out Tue May 10 09:30:08 2005
@@ -1,35 +1,42 @@
-conn 2 from ij.startJBMS() = EmbedConnection
 Beginning test: test1
-Executing test: begin 0 row test, create = true, drop = false.
+Executing test: begin deleteAllRows,0 row test, create = true.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 0 row test.
-Executing test: begin 0 row test, create = false, drop = true.
+Executing test: end deleteAllRows,0 row test.
+Executing test: begin deleteAllRows,0 row test, create = false.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 0 row test.
-Executing test: begin 1 row test, create = true, drop = false.
+Executing test: end deleteAllRows,0 row test.
+Executing test: begin checkPurgePhase0 row test, create = false.
+Executing test: end checkPurgePhase0 row test.
+Executing test: begin deleteAllRows,1 row test, create = true.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 1 row test.
-Executing test: begin 1 row test, create = false, drop = true.
+Executing test: end deleteAllRows,1 row test.
+Executing test: begin deleteAllRows,1 row test, create = false.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 1 row test.
-Executing test: begin 50 row test, create = true, drop = false.
+Executing test: end deleteAllRows,1 row test.
+Executing test: begin checkPurgePhase1 row test, create = false.
+Executing test: end checkPurgePhase1 row test.
+Executing test: begin deleteAllRows,50 row test, create = true.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 50 row test.
-Executing test: begin 50 row test, create = false, drop = true.
+Executing test: end deleteAllRows,50 row test.
+Executing test: begin deleteAllRows,50 row test, create = false.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 50 row test.
-Executing test: begin 10000 row test, create = true, drop = false.
+Executing test: end deleteAllRows,50 row test.
+Executing test: begin checkPurgePhase50 row test, create = false.
+Executing test: end checkPurgePhase50 row test.
+Executing test: begin deleteAllRows,4000 row test, create = true.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 10000 row test.
-Executing test: begin 10000 row test, create = false, drop = true.
+Executing test: end deleteAllRows,4000 row test.
+Executing test: begin deleteAllRows,4000 row test, create = false.
 Executing test: no delete case complete.
 Executing test: delete all rows case succeeded.
-Executing test: end 10000 row test.
+Executing test: end deleteAllRows,4000 row test.
+Executing test: begin checkPurgePhase4000 row test, create = false.
+Executing test: end checkPurgePhase4000 row test.
 Ending test: test1

Modified: incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/BaseTest.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/BaseTest.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/BaseTest.java (original)
+++ incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/BaseTest.java Tue May 10 09:30:08 2005
@@ -169,7 +169,8 @@
     String dump_table(
     Connection  conn,
     String      schemaName,
-    String      tableName)
+    String      tableName,
+    boolean     commit_transaction)
 		throws SQLException
     {
         if (!debug_system_procedures_created)
@@ -207,9 +208,93 @@
 
         rs.close();
 
-        conn.commit();
+        if (commit_transaction)
+            conn.commit();
 
         return(dump_table_info);
 
+    }
+
+    /**
+     * Get lock table.
+     * <p>
+     * Returns a single string with a dump of the entire lock table.
+     * <p>
+     *
+	 * @return The lock table.
+     *
+     * @param conn                  The connection to use.
+     * @param include_system_locks  If true include non-user locks like those
+     *                              requested by background internal threads.
+     *
+     **/
+    protected String get_lock_info(
+    Connection  conn,
+    boolean     include_system_locks)
+		throws SQLException
+    {
+        // Run the following query to get the current locks in the system,
+        // toggling the "t.type='UserTransaction'" based on 
+        // include_system_locks input:
+        //
+        // select
+        //     cast(l.xid as char(8)) as xid,
+        //     cast(username as char(8)) as username,
+        //     cast(t.type as char(8)) as trantype,
+        //     cast(l.type as char(8)) as type,
+        //     cast(lockcount as char(3)) as cnt,
+        //     cast(mode as char(4)) as mode,
+        //     cast(tablename as char(12)) as tabname,
+        //     cast(lockname as char(10)) as lockname,
+        //     state,
+        //     status
+        // from
+        //     new org.apache.derby.diag.LockTable() l  
+        // right outer join new org.apache.derby.diag.TransactionTable() t
+        //     on l.xid = t.xid where l.tableType <> 'S' and 
+        //        t.type='UserTransaction'
+        // order by
+        //     tabname, type desc, mode, cnt, lockname;
+        String lock_query = 
+            "select cast(l.xid as char(8)) as xid, cast(username as char(8)) as username, cast(t.type as char(8)) as trantype, cast(l.type as char(8)) as type, cast(lockcount as char(3)) as cnt, cast(mode as char(4)) as mode, cast(tablename as char(12)) as tabname, cast(lockname as char(10)) as lockname, state, status from new org.apache.derby.diag.LockTable() l right outer join new org.apache.derby.diag.TransactionTable() t on l.xid = t.xid where l.tableType <> 'S' ";
+        if (!include_system_locks)
+            lock_query += "and t.type='UserTransaction' ";
+        
+        lock_query += "order by tabname, type desc, mode, cnt, lockname";
+
+        PreparedStatement ps = conn.prepareStatement(lock_query);
+
+        ResultSet rs = ps.executeQuery();
+
+        String lock_output = 
+        "xid     |username|trantype|type    |cnt|mode|tabname     |lockname  |state|status\n" +
+        "---------------------------------------------------------------------------------\n";
+        while (rs.next())
+        {
+            String username     = rs.getString(1);
+            String trantype     = rs.getString(2);
+            String type         = rs.getString(3);
+            String lockcount    = rs.getString(4);
+            String mode         = rs.getString(5);
+            String tabname      = rs.getString(6);
+            String lockname     = rs.getString(7);
+            String state        = rs.getString(8);
+            String status       = rs.getString(9);
+
+            lock_output +=
+                username + "|" +
+                trantype + "|" +
+                type     + "|" +
+                lockcount+ "|" +
+                mode     + "|" +
+                tabname  + "|" +
+                lockname + "|" +
+                state    + "|" +
+                status   + "\n";
+        }
+
+        rs.close();
+
+        return(lock_output);
     }
 }

Modified: incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest.java?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest.java (original)
+++ incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest.java Tue May 10 09:30:08 2005
@@ -54,7 +54,8 @@
     String      tableName,
     boolean     purgeRows,
     boolean     defragmentRows,
-    boolean     truncateEnd)
+    boolean     truncateEnd,
+    boolean     commit_operation)
         throws SQLException
     {
         CallableStatement cstmt = 
@@ -68,7 +69,8 @@
 
         cstmt.execute();
 
-        conn.commit();
+        if (commit_operation)
+            conn.commit();
     }
 
     /**
@@ -96,7 +98,8 @@
     private int[] getSpaceInfo(
     Connection  conn,
     String      schemaName,
-    String      tableName)
+    String      tableName,
+    boolean     commit_xact)
 		throws SQLException
     {
         String stmt_str = 
@@ -146,52 +149,14 @@
 
         rs.close();
 
-        conn.commit();
+        if (commit_xact)
+            conn.commit();
 
         return(ret_info);
     }
 
 
     /**
-     * Determine if inplace compress did it's job.
-     * <p>
-     * Figuring out if inplace compress in a fully reproducible way is hard
-     * because derby has background threads which when given a chance do some
-     * of the space reclamation work that this routine does, so the absolute
-     * number of pages sometimes varies depending on machine/OS/JVM issues.
-     * <p>
-     * The approach here is to verify that at least N pages where reclaimed,
-     * assuming other varience is an acceptable difference based on background
-     * thread activity.  
-     * <p>
-     *
-	 * @return The identifier to be used to open the conglomerate later.
-     *
-     **/
-    private boolean checkBaseTableSpaceParameters(
-    Connection  conn,
-    String      schemaName,
-    String      tableName,
-    boolean     check_allocated_pages,
-    int         max_allocated_pages,
-    boolean     check_free_pages,
-    int         max_free_pages)
-		throws SQLException
-    {
-        int[] ret_info = getSpaceInfo(conn, schemaName, tableName);
-
-        int    is_index                 = ret_info[0];
-        int    num_alloc                = ret_info[1];
-        int    num_free                 = ret_info[2];
-        int    page_size                = ret_info[3];
-        int    estimate_space_savings   = ret_info[4];
-
-        return(true);
-    }
-
-
-
-    /**
      * Create and load a table.
      * <p>
      * If create_table is set creates a test data table with indexes.
@@ -278,12 +243,15 @@
 
     private void executeQuery(
     Connection  conn,
-    String      stmt_str)
+    String      stmt_str,
+    boolean     commit_query)
         throws SQLException
     {
         Statement stmt = conn.createStatement();
         stmt.executeUpdate(stmt_str);
-        conn.commit();
+        stmt.close();
+        if (commit_query)
+            conn.commit();
     }
 
     private void log_wrong_count(
@@ -297,21 +265,34 @@
     {
         System.out.println(error_msg);
         System.out.println("ERROR: for " + num_rows + " row  test. Expected " + expected_val + ", but got " + actual_val );
+        System.out.println("before_info:");
+        System.out.println(
+        "    IS_INDEX         =" + before_info[SPACE_INFO_IS_INDEX]     + 
+        "\n    NUM_ALLOC        =" + before_info[SPACE_INFO_NUM_ALLOC]    +
+        "\n    NUM_FREE         =" + before_info[SPACE_INFO_NUM_FREE]     +
+        "\n    PAGE_SIZE        =" + before_info[SPACE_INFO_PAGE_SIZE]    +
+        "\n    ESTIMSPACESAVING =" + before_info[SPACE_INFO_ESTIMSPACESAVING]);
+        System.out.println("after_info:");
+        System.out.println(
+        "    IS_INDEX         =" + after_info[SPACE_INFO_IS_INDEX]     + 
+        "\n    NUM_ALLOC        =" + after_info[SPACE_INFO_NUM_ALLOC]    +
+        "\n    NUM_FREE         =" + after_info[SPACE_INFO_NUM_FREE]     +
+        "\n    PAGE_SIZE        =" + after_info[SPACE_INFO_PAGE_SIZE]    +
+        "\n    ESTIMSPACESAVING =" + after_info[SPACE_INFO_ESTIMSPACESAVING]);
     }
 
 
-    private void row_count_based_tests(
+    private void deleteAllRows(
     Connection  conn,
     boolean     create_table,
-    boolean     drop_table,
     String      schemaName,
     String      table_name,
     int         num_rows) 
         throws SQLException 
     {
         testProgress(
-            "begin " + num_rows + " row test, create = " + 
-                create_table + ", drop = " + drop_table + ".");
+            "begin deleteAllRows," + num_rows + " row test, create = " + 
+                create_table + ".");
 
 
         createAndLoadTable(conn, create_table, table_name, num_rows);
@@ -320,9 +301,9 @@
             testProgress("Calling compress.");
 
         // compress with no deletes should not affect size
-        int[] ret_before = getSpaceInfo(conn, "APP", table_name);
-        callCompress(conn, "APP", table_name, true, true, true);
-        int[] ret_after  = getSpaceInfo(conn, "APP", table_name);
+        int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);
+        callCompress(conn, "APP", table_name, true, true, true, true);
+        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, true);
 
         if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
         {
@@ -345,15 +326,14 @@
         testProgress("no delete case complete.");
 
         // delete all the rows.
-        ret_before = getSpaceInfo(conn, "APP", table_name);
-        executeQuery(conn, "delete from " + table_name);
-        conn.commit();
+        ret_before = getSpaceInfo(conn, "APP", table_name, true);
+        executeQuery(conn, "delete from " + table_name, true);
 
         if (verbose)
             testProgress("deleted all rows, now calling compress.");
 
-        callCompress(conn, "APP", table_name, true, true, true);
-        ret_after  = getSpaceInfo(conn, "APP", table_name);
+        callCompress(conn, "APP", table_name, true, true, true, true);
+        ret_after  = getSpaceInfo(conn, "APP", table_name, true);
 
         // An empty table has 2 pages, one allocation page and the 1st page
         // which will have a system row in it.  The space vti only reports
@@ -376,54 +356,375 @@
 
         testProgress("delete all rows case succeeded.");
 
+        conn.commit();
+
+        testProgress("end deleteAllRows," + num_rows + " row test.");
+    }
+
+    /**
+     * Check/exercise purge pass phase.
+     * <p>
+     * Assumes that either test creates the table, or called on an empty
+     * table with no committed deleted rows or free pages in the middle of
+     * the table in it.
+     * <p>
+     *
+	 * @exception  StandardException  Standard exception policy.
+     **/
+    private void checkPurgePhase(
+    Connection  conn,
+    boolean     create_table,
+    String      schemaName,
+    String      table_name,
+    int         num_rows) 
+        throws SQLException 
+    {
+        testProgress(
+            "begin checkPurgePhase" + num_rows + " row test, create = " + 
+                create_table + ".");
+
+        createAndLoadTable(conn, create_table, table_name, num_rows);
+
+        // dump_table(conn, schemaName, table_name, false);
+
+        // delete all the rows, but don't commit the delete
+        int[] ret_before = getSpaceInfo(conn, "APP", table_name, false);
+        executeQuery(conn, "delete from " + table_name, false);
+
+
+        // dump_table(conn, schemaName, table_name, false);
+
+        // Purge pass on non-committed deleted rows should do nothing.  
+
+        // System.out.println("lock info before compress call:\n " + get_lock_info(conn, true));
+
+        // Calling compress with just the "purge" pass option, no commit called.
+        callCompress(conn, "APP", table_name, true, false, false, false);
+
+        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        // expect no change in the number of allocated pages!
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change(1).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        // expect no change in the number of free pages, if there are there
+        // is a problem with purge locking recognizing committed deleted rows.
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no free page change(1).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_FREE], 
+                ret_after[SPACE_INFO_NUM_FREE],
+                ret_before, ret_after);
+        }
+
+        // Test that it is ok to call multiple purge passes in single xact.
+
+        // Calling compress with just the "purge" pass option, no commit called.
+        callCompress(conn, "APP", table_name, true, false, false, false);
+        ret_after  = getSpaceInfo(conn, "APP", table_name, false);
 
-        if (drop_table)
-            executeQuery(conn, "drop table " + table_name);
+        // expect no change in the number of allocated pages!
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change(2).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        // expect no change in the number of free pages, if there are there
+        // is a problem with purge locking recognizing committed deleted rows.
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no free page change(2).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_FREE], 
+                ret_after[SPACE_INFO_NUM_FREE],
+                ret_before, ret_after);
+        }
 
+        // since table was just loaded a defragment pass also should
+        // not find anything to do.
+        
+        // Calling compress with just the "defragment" option, no commit called.
+
+        // currently the defragment option requires a table level lock in
+        // the nested user transaction, which will conflict and cause a
+        // lock timeout.
+
+        try
+        {
+            callCompress(conn, "APP", table_name, false, true, false, false);
+            
+            logError("Defragment pass did not get a lock timeout.");
+        }
+        catch (SQLException sqle)
+        {
+            // ignore exception.
+        }
+
+        ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change(3).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no free page change(3).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_FREE], 
+                ret_after[SPACE_INFO_NUM_FREE],
+                ret_before, ret_after);
+        }
+
+
+        // make sure table is back to all deleted row state.  lock timeout
+        // will abort transaction.
+        executeQuery(conn, "delete from " + table_name, true);
+        callCompress(conn, "APP", table_name, true, true, true, true);
+        createAndLoadTable(conn, create_table, table_name, num_rows);
         conn.commit();
+        executeQuery(conn, "delete from " + table_name, false);
+
+
+        // Calling compress with just the truncate option, may change allocated
+        // and free page count as they system may have preallocated pages to
+        // the end of the file as part of the load.  The file can't shrink
+        // any more than the free page count before the compress.
+
+        // running the truncate pass only.  If it compresses anything it is
+        // just the preallocated pages at end of the file.
 
-        testProgress("end " + num_rows + " row test.");
+        // currently the defragment option requires a table level lock in
+        // the nested user transaction, which will conflict and cause a
+        // lock timeout.
+
+
+        callCompress(conn, "APP", table_name, false, false, true, false);
+        ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        // expect no change in the number of allocated pages!
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change(3).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        // expect no change in the number of free pages, if there are there
+        // is a problem with purge locking recognizing committed deleted rows.
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no free page change(3).", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_FREE], 
+                ret_after[SPACE_INFO_NUM_FREE],
+                ret_before, ret_after);
+        }
+
+        // now commit the deletes, run all phases and make sure empty table
+        // results.
+        conn.commit();
+
+        // check the table.  Note that this will accumulate locks and
+        // will commit the transaction.
+        if (!checkConsistency(conn, schemaName, table_name))
+        {
+            logError("conistency check failed.");
+        }
+
+        // test running each phase in order.
+        callCompress(conn, "APP", table_name, true,  false, false, false);
+        callCompress(conn, "APP", table_name, false, true,  false, false);
+        callCompress(conn, "APP", table_name, false, false, true , false);
+        ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        // An empty table has 2 pages, one allocation page and the 1st page
+        // which will have a system row in it.  The space vti only reports
+        // a count of the user pages so the count is 1.
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)
+        {
+            log_wrong_count(
+                "Expected all pages to be truncated.",
+                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+        if (ret_after[SPACE_INFO_NUM_FREE] != 0)
+        {
+            log_wrong_count(
+                "Expected no free page after all pages truncated.",
+                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        if (verbose)
+            testProgress("calling consistency checker.");
+
+        if (!checkConsistency(conn, schemaName, table_name))
+        {
+            logError("conistency check failed.");
+        }
+
+        testProgress("end checkPurgePhase" + num_rows + " row test.");
     }
 
     /**
      * Test 1 alloc page test cases.
      * <p>
+     * perform a number of insert/delete/compress operations on a variety
+     * of sized tables, use space allocation information to verify that
+     * compression is happening and use consistency checker to verify that
+     * tables and indexes are all valid following the operations.
+     * <p>
      * loop through testing interesting row count cases.  The cases are
-     * 0  rows     - basic edge case, 2 page table: 1 alloc, 1 user page
-     * 1  row      - another edge case, 2 page table: 1 alloc, 1 user page
-     * 50 rows     - 3 page table case: 1 alloc, 1 user page, 1 user page freed
-     * 10000 rows  - reasonable number of pages to test out, still 1 alloc page
+     * 0    rows  - basic edge case, 2 page table: 1 alloc, 1 user page
+     * 1    row   - another edge case, 2 page table: 1 alloc, 1 user page
+     * 50   rows  - 3 page table case: 1 alloc, 1 user page, 1 user page freed
+     * 4000 rows  - reasonable number of pages to test out, still 1 alloc page
+     *
+     * note that row numbers greater than 4000 may lead to lock escalation
+     * issues, if queries like "delete from x" are used to delete all the 
+     * rows.
      *
-     * These tests can be run relatively quickly, not a lot of rows needed.
      * <p>
      *
      **/
-    private void test1(Connection conn) 
+    private void test1(
+    Connection  conn,
+    String      test_name,
+    String      table_name)
         throws SQLException 
     {
-        beginTest(conn, "test1");
+        beginTest(conn, test_name);
 
-        int[] test_cases = {0, 1, 50, 10000};
+        int[] test_cases = {0, 1, 50, 4000};
 
         for (int i = 0; i < test_cases.length; i++)
         {
             // first create new table and run the tests.
-            row_count_based_tests(
-                conn, true, false, "APP", "TEST1", test_cases[i]);
+            deleteAllRows(
+                conn, true, "APP", table_name, test_cases[i]);
 
             // now rerun tests on existing table, which had all rows deleted
             // and truncated.
-            row_count_based_tests(
-                conn, false, true, "APP", "TEST1", test_cases[i]);
+            deleteAllRows(
+                conn, false, "APP", table_name, test_cases[i]);
+
+            checkPurgePhase(
+                conn, false, "APP", table_name, test_cases[i]);
+
+            executeQuery(conn, "drop table " + table_name, true);
         }
 
-        endTest(conn, "test1");
+        endTest(conn, test_name);
     }
 
+    /**
+     * Purge of uncommitted deletes should not do anything.
+     * <p>
+     * In the same transaction insert a number of rows, delete them all
+     * and then run the purge operation.  The purge operation should find
+     * the rows deleted but not do anything with them as the transaction
+     * has not committed.
+     **/
+    private void test2(
+    Connection  conn,
+    String      test_name,
+    String      table_name,
+    int         num_rows)
+        throws SQLException 
+    {
+        beginTest(conn, test_name);
+
+        createAndLoadTable(conn, true, table_name, num_rows);
+
+        // Purge pass on non-committed deleted rows should do nothing.  
+
+        int[] ret_before = getSpaceInfo(conn, "APP", table_name, false);
+
+        // Calling compress with just the "purge" pass option, no commit called.
+        callCompress(conn, "APP", table_name, true, false, false, false);
+        int[] ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change.", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no alloc page change.", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        // since table was just loaded there a defragment pass also should
+        // not find anything to do.
+        
+        // Calling compress with just the "defragment" option, no commit called.
+        callCompress(conn, "APP", table_name, false, true, false, false);
+        ret_after  = getSpaceInfo(conn, "APP", table_name, false);
+
+        if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
+        {
+            log_wrong_count(
+                "Expected no alloc page change.", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+        if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
+        {
+            log_wrong_count(
+                "Expected no alloc page change.", 
+                table_name, num_rows, 
+                ret_before[SPACE_INFO_NUM_ALLOC], 
+                ret_after[SPACE_INFO_NUM_ALLOC],
+                ret_before, ret_after);
+        }
+
+        executeQuery(conn, "drop table " + table_name, true);
+
+        endTest(conn, test_name);
+    }
+
+
     public void testList(Connection conn)
         throws SQLException
     {
-        test1(conn);
+        test1(conn, "test1", "TEST1");
+        // test2(conn, "test2", "TEST2", 10000);
     }
 
     public static void main(String[] argv) 
@@ -433,7 +734,6 @@
 
    		ij.getPropertyArg(argv); 
         Connection conn = ij.startJBMS();
-        System.out.println("conn 2 from ij.startJBMS() = " + conn);
         conn.setAutoCommit(false);
 
         try

Modified: incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest_derby.properties
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest_derby.properties?rev=169492&r1=169491&r2=169492&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest_derby.properties (original)
+++ incubator/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/OnlineCompressTest_derby.properties Tue May 10 09:30:08 2005
@@ -1 +1,14 @@
 usedefaults=true
+
+#derby.storage.userLockTrace=true
+#derby.locks.deadlockTimeout=1
+#derby.locks.waitTimeout=3
+
+derby.infolog.append=true
+
+#derby.language.logStatementText=true
+
+#derby.debug.true=enableRowLocking,DeadlockTrace
+#derby.debug.true=DeadlockTrace
+#derby.debug.true=userLockStackTrace,DeadlockTrace
+



Mime
View raw message