db-derby-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bakk...@apache.org
Subject svn commit: r329187 [52/66] - in /db/derby/code/trunk: ./ frameworks/NetworkServer/ frameworks/NetworkServer/bin/ frameworks/embedded/bin/ java/build/ java/build/org/apache/derbyBuild/ java/build/org/apache/derbyBuild/eclipse/ java/build/org/apache/der...
Date Fri, 28 Oct 2005 12:52:21 GMT
Modified: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TableLockBasic.subsql
URL: http://svn.apache.org/viewcvs/db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TableLockBasic.subsql?rev=329187&r1=329186&r2=329187&view=diff
==============================================================================
--- db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TableLockBasic.subsql (original)
+++ db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TableLockBasic.subsql Fri Oct 28 04:51:50 2005
@@ -1,176 +1,176 @@
--- Very basic single user testing of table locking, verify that the right locks
--- are obtained for simple operations.  This test only looks at table and
--- row logical locks, it does not verify physical latches or lock ordering.
---
--- The basic methodology is:
---    start transaction
---    simple operation
---    print lock table which should match the master
---    end transation
--- 
-
-run resource 'createTestProcedures.subsql';
-run resource 'LockTableQuery.subsql';
-
-autocommit off;
-
-create table heap_only (a int);
-
-commit;
-
---------------------------------------------------------------------------------
--- Test insert into empty heap, should just get table lock 
---------------------------------------------------------------------------------
-insert into heap_only values (1);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
-
---------------------------------------------------------------------------------
--- Test insert into heap with one row, just get table lock 
---------------------------------------------------------------------------------
-insert into heap_only values (2);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test select from a heap, should get shared table lock.
---------------------------------------------------------------------------------
-select a from heap_only where a = 1;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test delete from a heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-delete from heap_only where a = 1;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test update to heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-update heap_only set a = 1000 where a = 2;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test drop of heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-drop table heap_only;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
-call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', '4096');
-create table indexed_heap (a int, b varchar(1000));
-call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', NULL);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
-call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', '4096');
-create index a_idx on indexed_heap (a, b);
-call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', NULL);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test insert into indexed heap, should just get table lock 
---------------------------------------------------------------------------------
-insert into indexed_heap (a) values (1);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test insert into indexed heap with one row, just get table lock 
---------------------------------------------------------------------------------
-insert into indexed_heap (a) values (2);
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test select from a indexed heap, should get shared table lock.
---------------------------------------------------------------------------------
-select a from indexed_heap where a = 1;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test delete from a indexed heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-delete from indexed_heap where a = 1;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test update to indexed heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-update indexed_heap set a = 1000 where a = 2;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test drop of indexed heap, should get exclusive table lock.
---------------------------------------------------------------------------------
-drop table indexed_heap;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test LOCK TABLE statement
---------------------------------------------------------------------------------
-create table t1(c1 int);
-commit;
-
-prepare p1 as 'lock table t1 in exclusive mode';
-execute p1;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
--- verify that statement gets recompiled correctly
-drop table t1;
-create table t1(c1 int);
-execute p1;
-commit;
-
-lock table t1 in share mode;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-drop table t1;
-commit;
-
--- verify that lock table not allowed in sys schema
-lock table sys.systables in exclusive mode;
-select * from lock_table order by tabname, type desc, mode, cnt, lockname;
-commit;
-
---------------------------------------------------------------------------------
--- Test RTS output when table locking configured
---------------------------------------------------------------------------------
-call SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1);
-maximumdisplaywidth 2000;
-create table rts(c1 int);
-insert into rts values 1;
-commit;
-select * from rts with cs;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-drop table rts;
-commit;
-
---------------------------------------------------------------------------------
--- Test DDL TABLE LOCK MODE
---------------------------------------------------------------------------------
-create table default_granularity(c1 int);
-create table row_granularity(c1 int);
-alter table row_granularity locksize row;
-create table table_granularity(c1 int);
-alter table table_granularity locksize table;
-select * from default_granularity with cs;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-select * from default_granularity with rr;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-select * from row_granularity with cs;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-select * from row_granularity with rr;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-select * from table_granularity with cs;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-select * from table_granularity with rr;
-values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
-rollback;
+-- Very basic single user testing of table locking, verify that the right locks
+-- are obtained for simple operations.  This test only looks at table and
+-- row logical locks, it does not verify physical latches or lock ordering.
+--
+-- The basic methodology is:
+--    start transaction
+--    simple operation
+--    print lock table which should match the master
+--    end transation
+-- 
+
+run resource 'createTestProcedures.subsql';
+run resource 'LockTableQuery.subsql';
+
+autocommit off;
+
+create table heap_only (a int);
+
+commit;
+
+--------------------------------------------------------------------------------
+-- Test insert into empty heap, should just get table lock 
+--------------------------------------------------------------------------------
+insert into heap_only values (1);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+
+--------------------------------------------------------------------------------
+-- Test insert into heap with one row, just get table lock 
+--------------------------------------------------------------------------------
+insert into heap_only values (2);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test select from a heap, should get shared table lock.
+--------------------------------------------------------------------------------
+select a from heap_only where a = 1;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test delete from a heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+delete from heap_only where a = 1;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test update to heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+update heap_only set a = 1000 where a = 2;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test drop of heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+drop table heap_only;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', '4096');
+create table indexed_heap (a int, b varchar(1000));
+call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', NULL);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', '4096');
+create index a_idx on indexed_heap (a, b);
+call SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY('derby.storage.pageSize', NULL);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test insert into indexed heap, should just get table lock 
+--------------------------------------------------------------------------------
+insert into indexed_heap (a) values (1);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test insert into indexed heap with one row, just get table lock 
+--------------------------------------------------------------------------------
+insert into indexed_heap (a) values (2);
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test select from a indexed heap, should get shared table lock.
+--------------------------------------------------------------------------------
+select a from indexed_heap where a = 1;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test delete from a indexed heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+delete from indexed_heap where a = 1;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test update to indexed heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+update indexed_heap set a = 1000 where a = 2;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test drop of indexed heap, should get exclusive table lock.
+--------------------------------------------------------------------------------
+drop table indexed_heap;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test LOCK TABLE statement
+--------------------------------------------------------------------------------
+create table t1(c1 int);
+commit;
+
+prepare p1 as 'lock table t1 in exclusive mode';
+execute p1;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+-- verify that statement gets recompiled correctly
+drop table t1;
+create table t1(c1 int);
+execute p1;
+commit;
+
+lock table t1 in share mode;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+drop table t1;
+commit;
+
+-- verify that lock table not allowed in sys schema
+lock table sys.systables in exclusive mode;
+select * from lock_table order by tabname, type desc, mode, cnt, lockname;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test RTS output when table locking configured
+--------------------------------------------------------------------------------
+call SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1);
+maximumdisplaywidth 2000;
+create table rts(c1 int);
+insert into rts values 1;
+commit;
+select * from rts with cs;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+drop table rts;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test DDL TABLE LOCK MODE
+--------------------------------------------------------------------------------
+create table default_granularity(c1 int);
+create table row_granularity(c1 int);
+alter table row_granularity locksize row;
+create table table_granularity(c1 int);
+alter table table_granularity locksize table;
+select * from default_granularity with cs;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+select * from default_granularity with rr;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+select * from row_granularity with cs;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+select * from row_granularity with rr;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+select * from table_granularity with cs;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+select * from table_granularity with rr;
+values SYSCS_UTIL.SYSCS_GET_RUNTIMESTATISTICS();
+rollback;

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TableLockBasic.subsql
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDiskHashtable.java
URL: http://svn.apache.org/viewcvs/db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDiskHashtable.java?rev=329187&r1=329186&r2=329187&view=diff
==============================================================================
--- db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDiskHashtable.java (original)
+++ db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDiskHashtable.java Fri Oct 28 04:51:50 2005
@@ -1,432 +1,432 @@
-/*
-
-   Derby - Class org.apache.derbyTesting.functionTests.tests.store.TestDiskHashtable
-
-   Copyright 2005 The Apache Software Foundation or its licensors, as applicable.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
- */
-
-package org.apache.derbyTesting.functionTests.tests.store;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-import java.util.BitSet;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Vector;
-
-import org.apache.derby.iapi.error.PublicAPI;
-import org.apache.derby.iapi.error.StandardException;
-import org.apache.derby.iapi.sql.conn.ConnectionUtil;
-import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
-import org.apache.derby.iapi.store.access.DiskHashtable;
-import org.apache.derby.iapi.store.access.KeyHasher;
-import org.apache.derby.iapi.store.access.TransactionController;
-import org.apache.derby.iapi.types.DataValueDescriptor;
-import org.apache.derby.iapi.types.Orderable;
-import org.apache.derby.iapi.types.SQLInteger;
-import org.apache.derby.iapi.types.SQLLongint;
-import org.apache.derby.iapi.types.SQLVarchar;
-import org.apache.derby.tools.ij;
-import org.apache.derbyTesting.functionTests.util.TestUtil;
-
-/**
- * This program tests the org.apache.derby.iapi.store.access.DiskHashtable class.
- * The unit test interface is not used because that is undocumented and very difficult to decipher.
- * Furthermore it is difficult to diagnose problems when using the unit test interface.
- *
- * Created: Wed Feb 09 15:44:12 2005
- *
- * @author <a href="mailto:klebanof@us.ibm.com">Jack Klebanoff</a>
- * @version 1.0
- */
-public class TestDiskHashtable 
-{
-    private TransactionController tc;
-    private int failed = 0;
-    
-    public static void main( String args[])
-    {
-        int failed = 1;
-
-		REPORT("Test DiskHashtable starting");
-        try
-        {
-			// use the ij utility to read the property file and
-			// make the initial connection.
-			ij.getPropertyArg(args);
-			Connection conn = ij.startJBMS();
-            Statement stmt = conn.createStatement();
-            stmt.execute("CREATE FUNCTION testDiskHashtable() returns INTEGER EXTERNAL NAME 'org.apache.derbyTesting.functionTests.tests.store.TestDiskHashtable.runTests' LANGUAGE JAVA PARAMETER STYLE JAVA");
-            ResultSet rs = stmt.executeQuery( "values( testDiskHashtable())");
-            if( rs.next())
-                failed = rs.getInt(1);
-            stmt.close();
-            conn.close();
-        }
-        catch( SQLException e)
-        {
-			TestUtil.dumpSQLExceptions( e);
-            failed = 1;
-        }
-        catch( Throwable t)
-        {
-			REPORT("FAIL -- unexpected exception:" + t.toString());
-            failed = 1;
-		}
-        REPORT( (failed == 0) ? "OK" : "FAILED");
-        System.exit( (failed == 0) ? 0 : 1);
-    }
-
-    private void REPORT_FAILURE(String msg)
-    {
-        failed = 1;
-        REPORT( msg);
-    }
-    
-    private static void REPORT(String msg)
-    {
-        System.out.println( msg);
-    }
-    
-    public static int runTests() throws SQLException
-    {
-        TestDiskHashtable tester = new TestDiskHashtable();
-        return tester.doIt();
-    }
-
-    private TestDiskHashtable() throws SQLException
-    {
-        LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
-        if( lcc == null)
-            throw new SQLException( "Cannot get the LCC");
-        tc = lcc.getTransactionExecute();
-    }
-
-    private int doIt() throws SQLException
-    {
-		try {
-
-
-            REPORT( "Starting single key, keep duplicates test");
-            testOneVariant( tc, false, singleKeyTemplate, singleKeyCols, singleKeyRows);
-            REPORT( "Starting single key, remove duplicates test");
-            testOneVariant( tc, true, singleKeyTemplate, singleKeyCols, singleKeyRows);
-            REPORT( "Starting multiple key, keep duplicates test");
-            testOneVariant( tc, false, multiKeyTemplate, multiKeyCols, multiKeyRows);
-            REPORT( "Starting multiple key, remove duplicates test");
-            testOneVariant( tc, true, multiKeyTemplate, multiKeyCols, multiKeyRows);
-
-			tc.commit();
-		}
-		catch (StandardException se)
-		{
-            throw PublicAPI.wrapStandardException( se);
-        }
-        return failed;
-    } // end of doIt
-
-    private static final DataValueDescriptor[] singleKeyTemplate = { new SQLInteger(), new SQLVarchar()};
-    private static final int[] singleKeyCols = {0};
-    private static final DataValueDescriptor[][] singleKeyRows =
-    {
-        {new SQLInteger(1), new SQLVarchar("abcd")},
-        {new SQLInteger(2), new SQLVarchar("abcd")},
-        {new SQLInteger(3), new SQLVarchar("e")},
-        {new SQLInteger(1), new SQLVarchar("zz")}
-    };
-
-    private static final DataValueDescriptor[] multiKeyTemplate = { new SQLLongint(), new SQLVarchar(), new SQLInteger()};
-    private static final int[] multiKeyCols = {1, 0};
-    private static final DataValueDescriptor[][] multiKeyRows =
-    {
-        {new SQLLongint(1), new SQLVarchar( "aa"), multiKeyTemplate[2].getNewNull()},
-        {new SQLLongint(2), new SQLVarchar( "aa"), new SQLInteger(1)},
-        {new SQLLongint(2), new SQLVarchar( "aa"), new SQLInteger(2)},
-        {new SQLLongint(2), new SQLVarchar( "b"), new SQLInteger(1)}
-    };
-
-    private static final int LOTS_OF_ROWS_COUNT = 50000;
-    
-    private void testOneVariant( TransactionController tc,
-                                 boolean removeDups,
-                                 DataValueDescriptor[] template,
-                                 int[] keyCols,
-                                 DataValueDescriptor[][] rows)
-        throws StandardException
-    {
-        DiskHashtable dht = new DiskHashtable(tc, template, keyCols, removeDups, false);
-        boolean[] isDuplicate = new boolean[ rows.length];
-        boolean[] found = new boolean[ rows.length];
-        HashMap simpleHash = new HashMap( rows.length);
-
-        testElements( removeDups, dht, keyCols, 0, rows, simpleHash, isDuplicate, found);
-
-        for( int i = 0; i < rows.length; i++)
-        {
-            Object key = KeyHasher.buildHashKey( rows[i], keyCols);
-            Vector al = (Vector) simpleHash.get( key);
-            isDuplicate[i] = (al != null);
-            if( al == null)
-            {
-                al = new Vector(4);
-                simpleHash.put( key, al);
-            }
-            if( (!removeDups) || !isDuplicate[i])
-                al.add( rows[i]);
-            
-            if( dht.put( key, rows[i]) != (removeDups ? (!isDuplicate[i]) : true))
-                REPORT_FAILURE( "  put returned wrong value on row " + i);
-
-            for( int j = 0; j <= i; j++)
-            {
-                key = KeyHasher.buildHashKey( rows[j], keyCols);
-                if( ! rowsEqual( dht.get( key), simpleHash.get( key)))
-                    REPORT_FAILURE( "  get returned wrong value on key " + j);
-            }
-
-            testElements( removeDups, dht, keyCols, i+1, rows, simpleHash, isDuplicate, found);
-        }
-        // Remove them
-        for( int i = 0; i < rows.length; i++)
-        {
-            Object key = KeyHasher.buildHashKey( rows[i], keyCols);
-            if( ! rowsEqual( dht.remove( key), simpleHash.get( key)))
-                REPORT_FAILURE( "  remove returned wrong value on key " + i);
-            simpleHash.remove( key);
-            if( dht.get( key) != null)
-                REPORT_FAILURE( "  remove did not delete key " + i);
-        }
-        testElements( removeDups, dht, keyCols, 0, rows, simpleHash, isDuplicate, found);
-
-        testLargeTable( dht, keyCols, rows[0]);
-        dht.close();
-    } // end of testOneVariant
-
-    private void testLargeTable( DiskHashtable dht,
-                                 int[] keyCols,
-                                 DataValueDescriptor[] aRow)
-        throws StandardException
-    {
-        // Add a lot of elements
-        // If there are two or more key columns then we will vary the first two key columns, using an approximately
-        // square matrix of integer key values. Because the hash generator is commutative key (i,j) hashes into the
-        // same bucket as key (j,i), testing the case where different keys hash into the same bucket.
-        int key1Count = (keyCols.length > 1) ? ((int) Math.round( Math.sqrt( (double) LOTS_OF_ROWS_COUNT))) : 1;
-        int key0Count = (LOTS_OF_ROWS_COUNT + key1Count - 1)/key1Count;
-
-        DataValueDescriptor[] row = new DataValueDescriptor[ aRow.length];
-        for( int i = 0; i < row.length; i++)
-            row[i] = aRow[i].getClone();
-        
-        for( int key0Idx = 0; key0Idx < key0Count; key0Idx++)
-        {
-            row[ keyCols[0]].setValue( key0Idx);
-            for( int key1Idx = 0; key1Idx < key1Count; key1Idx++)
-            {
-                if( keyCols.length > 1)
-                    row[ keyCols[1]].setValue( key1Idx);
-                Object key = KeyHasher.buildHashKey( row, keyCols);
-                if( ! dht.put( key, row))
-                {
-                    REPORT_FAILURE( "  put returned wrong value for key(" + key0Idx + "," + key1Idx + ")");
-                    key0Idx = key0Count;
-                    break;
-                }
-            }
-        }
-        for( int key0Idx = 0; key0Idx < key0Count; key0Idx++)
-        {
-            row[ keyCols[0]].setValue( key0Idx);
-            for( int key1Idx = 0; key1Idx < key1Count; key1Idx++)
-            {
-                if( keyCols.length > 1)
-                    row[ keyCols[1]].setValue( key1Idx);
-                Object key = KeyHasher.buildHashKey( row, keyCols);
-                if( ! rowsEqual( dht.get( key), row))
-                {
-                    REPORT_FAILURE( "  large table get returned wrong value for key(" + key0Idx + "," + key1Idx + ")");
-                    key0Idx = key0Count;
-                    break;
-                }
-            }
-        }
-        BitSet found = new BitSet(key0Count * key1Count);
-        Enumeration elements = dht.elements();
-        while( elements.hasMoreElements())
-        {
-            Object el = elements.nextElement();
-            if( ! (el instanceof DataValueDescriptor[]))
-            {
-                REPORT_FAILURE( "  large table enumeration returned wrong element type");
-                break;
-            }
-            DataValueDescriptor[] fetchedRow = (DataValueDescriptor[]) el;
-            
-            int i = fetchedRow[ keyCols[0]].getInt() * key1Count;
-            if( keyCols.length > 1)
-                i += fetchedRow[ keyCols[1]].getInt();
-            if( i >= key0Count * key1Count)
-            {
-                REPORT_FAILURE( "  large table enumeration returned invalid element");
-                break;
-            }
-                
-            if( found.get(i))
-            {
-                REPORT_FAILURE( "  large table enumeration returned same element twice");
-                break;
-            }
-            found.set(i);
-        }
-        for( int i = key0Count * key1Count - 1; i >= 0; i--)
-        {
-            if( !found.get(i))
-            {
-                REPORT_FAILURE( "  large table enumeration missed at least one element");
-                break;
-            }
-        }
-    } // end of testLargeTable
-
-    private void testElements( boolean removeDups,
-                               DiskHashtable dht,
-                               int[] keyCols,
-                               int rowCount,
-                               DataValueDescriptor[][] rows,
-                               HashMap simpleHash,
-                               boolean[] isDuplicate,
-                               boolean[] found)
-        throws StandardException
-    {
-        for( int i = 0; i < rowCount; i++)
-            found[i] = false;
-        
-        for( Enumeration e = dht.elements(); e.hasMoreElements();)
-        {
-            Object el = e.nextElement();
-            if( el == null)
-            {
-                REPORT_FAILURE( "  table enumeration returned a null element");
-                return;
-            }
-            if( el instanceof DataValueDescriptor[])
-                checkElement( (DataValueDescriptor[]) el, rowCount, rows, found);
-            else if( el instanceof Vector)
-            {
-                Vector v = (Vector) el;
-                for( int i = 0; i < v.size(); i++)
-                    checkElement( (DataValueDescriptor[]) v.get(i), rowCount, rows, found);
-            }
-            else if( el == null)
-            {
-                REPORT_FAILURE( "  table enumeration returned an incorrect element type");
-                return;
-            }
-        }
-        for( int i = 0; i < rowCount; i++)
-        {
-            if( (removeDups && isDuplicate[i]))
-            {
-                if( found[i])
-                {
-                    REPORT_FAILURE( "  table enumeration did not remove duplicates");
-                    return;
-                }
-            }
-            else if( ! found[i])
-            {
-                REPORT_FAILURE( "  table enumeration missed at least one element");
-                return;
-            }
-        }
-    } // end of testElements
-
-    private void checkElement( DataValueDescriptor[] fetchedRow,
-                               int rowCount,
-                               DataValueDescriptor[][] rows,
-                               boolean[] found)
-        throws StandardException
-    {
-        for( int i = 0; i < rowCount; i++)
-        {
-            if( rowsEqual( fetchedRow, rows[i]))
-            {
-                if( found[i])
-                {
-                    REPORT_FAILURE( "  table enumeration returned the same element twice");
-                    return;
-                }
-                found[i] = true;
-                return;
-            }
-        }
-        REPORT_FAILURE( "  table enumeration returned an incorrect element");
-    } // end of checkElement
-
-    private boolean rowsEqual( Object r1, Object r2)
-        throws StandardException
-    {
-        if( r1 == null)
-            return r2 == null;
-
-        if( r1 instanceof DataValueDescriptor[])
-        {
-            DataValueDescriptor[] row1 = (DataValueDescriptor[]) r1;
-            DataValueDescriptor[] row2;
-            
-            if( r2 instanceof Vector)
-            {
-                Vector v2 = (Vector) r2;
-                if( v2.size() != 1)
-                    return false;
-                row2 = (DataValueDescriptor[]) v2.elementAt(0);
-            }
-            else if( r2 instanceof DataValueDescriptor[])
-                row2 = (DataValueDescriptor[]) r2;
-            else
-                return false;
-            
-            if( row1.length != row2.length)
-                return false;
-            for( int i = 0; i < row1.length; i++)
-            {
-                if( ! row1[i].compare( Orderable.ORDER_OP_EQUALS, row2[i], true, true))
-                    return false;
-            }
-            return true;
-        }
-        if( r1 instanceof Vector)
-        {
-            if( !(r2 instanceof Vector))
-                return false;
-            Vector v1 = (Vector) r1;
-            Vector v2 = (Vector) r2;
-            if( v1.size() != v2.size())
-                return false;
-            for( int i = v1.size() - 1; i >= 0; i--)
-            {
-                if( ! rowsEqual( v1.elementAt( i), v2.elementAt(i)))
-                    return false;
-            }
-            return true;
-        }
-        // What is it then?
-        return r1.equals( r2);
-    } // end of rowsEqual
-}
+/*
+
+   Derby - Class org.apache.derbyTesting.functionTests.tests.store.TestDiskHashtable
+
+   Copyright 2005 The Apache Software Foundation or its licensors, as applicable.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ */
+
+package org.apache.derbyTesting.functionTests.tests.store;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import java.util.BitSet;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Vector;
+
+import org.apache.derby.iapi.error.PublicAPI;
+import org.apache.derby.iapi.error.StandardException;
+import org.apache.derby.iapi.sql.conn.ConnectionUtil;
+import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
+import org.apache.derby.iapi.store.access.DiskHashtable;
+import org.apache.derby.iapi.store.access.KeyHasher;
+import org.apache.derby.iapi.store.access.TransactionController;
+import org.apache.derby.iapi.types.DataValueDescriptor;
+import org.apache.derby.iapi.types.Orderable;
+import org.apache.derby.iapi.types.SQLInteger;
+import org.apache.derby.iapi.types.SQLLongint;
+import org.apache.derby.iapi.types.SQLVarchar;
+import org.apache.derby.tools.ij;
+import org.apache.derbyTesting.functionTests.util.TestUtil;
+
+/**
+ * This program tests the org.apache.derby.iapi.store.access.DiskHashtable class.
+ * The unit test interface is not used because that is undocumented and very difficult to decipher.
+ * Furthermore it is difficult to diagnose problems when using the unit test interface.
+ *
+ * Created: Wed Feb 09 15:44:12 2005
+ *
+ * @author <a href="mailto:klebanof@us.ibm.com">Jack Klebanoff</a>
+ * @version 1.0
+ */
+public class TestDiskHashtable 
+{
+    private TransactionController tc;
+    private int failed = 0;
+    
+    public static void main( String args[])
+    {
+        int failed = 1;
+
+		REPORT("Test DiskHashtable starting");
+        try
+        {
+			// use the ij utility to read the property file and
+			// make the initial connection.
+			ij.getPropertyArg(args);
+			Connection conn = ij.startJBMS();
+            Statement stmt = conn.createStatement();
+            stmt.execute("CREATE FUNCTION testDiskHashtable() returns INTEGER EXTERNAL NAME 'org.apache.derbyTesting.functionTests.tests.store.TestDiskHashtable.runTests' LANGUAGE JAVA PARAMETER STYLE JAVA");
+            ResultSet rs = stmt.executeQuery( "values( testDiskHashtable())");
+            if( rs.next())
+                failed = rs.getInt(1);
+            stmt.close();
+            conn.close();
+        }
+        catch( SQLException e)
+        {
+			TestUtil.dumpSQLExceptions( e);
+            failed = 1;
+        }
+        catch( Throwable t)
+        {
+			REPORT("FAIL -- unexpected exception:" + t.toString());
+            failed = 1;
+		}
+        REPORT( (failed == 0) ? "OK" : "FAILED");
+        System.exit( (failed == 0) ? 0 : 1);
+    }
+
+    private void REPORT_FAILURE(String msg)
+    {
+        failed = 1;
+        REPORT( msg);
+    }
+    
+    private static void REPORT(String msg)
+    {
+        System.out.println( msg);
+    }
+    
+    public static int runTests() throws SQLException
+    {
+        TestDiskHashtable tester = new TestDiskHashtable();
+        return tester.doIt();
+    }
+
+    private TestDiskHashtable() throws SQLException
+    {
+        LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
+        if( lcc == null)
+            throw new SQLException( "Cannot get the LCC");
+        tc = lcc.getTransactionExecute();
+    }
+
+    private int doIt() throws SQLException
+    {
+		try {
+
+
+            REPORT( "Starting single key, keep duplicates test");
+            testOneVariant( tc, false, singleKeyTemplate, singleKeyCols, singleKeyRows);
+            REPORT( "Starting single key, remove duplicates test");
+            testOneVariant( tc, true, singleKeyTemplate, singleKeyCols, singleKeyRows);
+            REPORT( "Starting multiple key, keep duplicates test");
+            testOneVariant( tc, false, multiKeyTemplate, multiKeyCols, multiKeyRows);
+            REPORT( "Starting multiple key, remove duplicates test");
+            testOneVariant( tc, true, multiKeyTemplate, multiKeyCols, multiKeyRows);
+
+			tc.commit();
+		}
+		catch (StandardException se)
+		{
+            throw PublicAPI.wrapStandardException( se);
+        }
+        return failed;
+    } // end of doIt
+
+    private static final DataValueDescriptor[] singleKeyTemplate = { new SQLInteger(), new SQLVarchar()};
+    private static final int[] singleKeyCols = {0};
+    private static final DataValueDescriptor[][] singleKeyRows =
+    {
+        {new SQLInteger(1), new SQLVarchar("abcd")},
+        {new SQLInteger(2), new SQLVarchar("abcd")},
+        {new SQLInteger(3), new SQLVarchar("e")},
+        {new SQLInteger(1), new SQLVarchar("zz")}
+    };
+
+    private static final DataValueDescriptor[] multiKeyTemplate = { new SQLLongint(), new SQLVarchar(), new SQLInteger()};
+    private static final int[] multiKeyCols = {1, 0};
+    private static final DataValueDescriptor[][] multiKeyRows =
+    {
+        {new SQLLongint(1), new SQLVarchar( "aa"), multiKeyTemplate[2].getNewNull()},
+        {new SQLLongint(2), new SQLVarchar( "aa"), new SQLInteger(1)},
+        {new SQLLongint(2), new SQLVarchar( "aa"), new SQLInteger(2)},
+        {new SQLLongint(2), new SQLVarchar( "b"), new SQLInteger(1)}
+    };
+
+    private static final int LOTS_OF_ROWS_COUNT = 50000;
+    
+    private void testOneVariant( TransactionController tc,
+                                 boolean removeDups,
+                                 DataValueDescriptor[] template,
+                                 int[] keyCols,
+                                 DataValueDescriptor[][] rows)
+        throws StandardException
+    {
+        DiskHashtable dht = new DiskHashtable(tc, template, keyCols, removeDups, false);
+        boolean[] isDuplicate = new boolean[ rows.length];
+        boolean[] found = new boolean[ rows.length];
+        HashMap simpleHash = new HashMap( rows.length);
+
+        testElements( removeDups, dht, keyCols, 0, rows, simpleHash, isDuplicate, found);
+
+        for( int i = 0; i < rows.length; i++)
+        {
+            Object key = KeyHasher.buildHashKey( rows[i], keyCols);
+            Vector al = (Vector) simpleHash.get( key);
+            isDuplicate[i] = (al != null);
+            if( al == null)
+            {
+                al = new Vector(4);
+                simpleHash.put( key, al);
+            }
+            if( (!removeDups) || !isDuplicate[i])
+                al.add( rows[i]);
+            
+            if( dht.put( key, rows[i]) != (removeDups ? (!isDuplicate[i]) : true))
+                REPORT_FAILURE( "  put returned wrong value on row " + i);
+
+            for( int j = 0; j <= i; j++)
+            {
+                key = KeyHasher.buildHashKey( rows[j], keyCols);
+                if( ! rowsEqual( dht.get( key), simpleHash.get( key)))
+                    REPORT_FAILURE( "  get returned wrong value on key " + j);
+            }
+
+            testElements( removeDups, dht, keyCols, i+1, rows, simpleHash, isDuplicate, found);
+        }
+        // Remove them
+        for( int i = 0; i < rows.length; i++)
+        {
+            Object key = KeyHasher.buildHashKey( rows[i], keyCols);
+            if( ! rowsEqual( dht.remove( key), simpleHash.get( key)))
+                REPORT_FAILURE( "  remove returned wrong value on key " + i);
+            simpleHash.remove( key);
+            if( dht.get( key) != null)
+                REPORT_FAILURE( "  remove did not delete key " + i);
+        }
+        testElements( removeDups, dht, keyCols, 0, rows, simpleHash, isDuplicate, found);
+
+        testLargeTable( dht, keyCols, rows[0]);
+        dht.close();
+    } // end of testOneVariant
+
+    private void testLargeTable( DiskHashtable dht,
+                                 int[] keyCols,
+                                 DataValueDescriptor[] aRow)
+        throws StandardException
+    {
+        // Add a lot of elements
+        // If there are two or more key columns then we will vary the first two key columns, using an approximately
+        // square matrix of integer key values. Because the hash generator is commutative key (i,j) hashes into the
+        // same bucket as key (j,i), testing the case where different keys hash into the same bucket.
+        int key1Count = (keyCols.length > 1) ? ((int) Math.round( Math.sqrt( (double) LOTS_OF_ROWS_COUNT))) : 1;
+        int key0Count = (LOTS_OF_ROWS_COUNT + key1Count - 1)/key1Count;
+
+        DataValueDescriptor[] row = new DataValueDescriptor[ aRow.length];
+        for( int i = 0; i < row.length; i++)
+            row[i] = aRow[i].getClone();
+        
+        for( int key0Idx = 0; key0Idx < key0Count; key0Idx++)
+        {
+            row[ keyCols[0]].setValue( key0Idx);
+            for( int key1Idx = 0; key1Idx < key1Count; key1Idx++)
+            {
+                if( keyCols.length > 1)
+                    row[ keyCols[1]].setValue( key1Idx);
+                Object key = KeyHasher.buildHashKey( row, keyCols);
+                if( ! dht.put( key, row))
+                {
+                    REPORT_FAILURE( "  put returned wrong value for key(" + key0Idx + "," + key1Idx + ")");
+                    key0Idx = key0Count;
+                    break;
+                }
+            }
+        }
+        for( int key0Idx = 0; key0Idx < key0Count; key0Idx++)
+        {
+            row[ keyCols[0]].setValue( key0Idx);
+            for( int key1Idx = 0; key1Idx < key1Count; key1Idx++)
+            {
+                if( keyCols.length > 1)
+                    row[ keyCols[1]].setValue( key1Idx);
+                Object key = KeyHasher.buildHashKey( row, keyCols);
+                if( ! rowsEqual( dht.get( key), row))
+                {
+                    REPORT_FAILURE( "  large table get returned wrong value for key(" + key0Idx + "," + key1Idx + ")");
+                    key0Idx = key0Count;
+                    break;
+                }
+            }
+        }
+        BitSet found = new BitSet(key0Count * key1Count);
+        Enumeration elements = dht.elements();
+        while( elements.hasMoreElements())
+        {
+            Object el = elements.nextElement();
+            if( ! (el instanceof DataValueDescriptor[]))
+            {
+                REPORT_FAILURE( "  large table enumeration returned wrong element type");
+                break;
+            }
+            DataValueDescriptor[] fetchedRow = (DataValueDescriptor[]) el;
+            
+            int i = fetchedRow[ keyCols[0]].getInt() * key1Count;
+            if( keyCols.length > 1)
+                i += fetchedRow[ keyCols[1]].getInt();
+            if( i >= key0Count * key1Count)
+            {
+                REPORT_FAILURE( "  large table enumeration returned invalid element");
+                break;
+            }
+                
+            if( found.get(i))
+            {
+                REPORT_FAILURE( "  large table enumeration returned same element twice");
+                break;
+            }
+            found.set(i);
+        }
+        for( int i = key0Count * key1Count - 1; i >= 0; i--)
+        {
+            if( !found.get(i))
+            {
+                REPORT_FAILURE( "  large table enumeration missed at least one element");
+                break;
+            }
+        }
+    } // end of testLargeTable
+
+    private void testElements( boolean removeDups,
+                               DiskHashtable dht,
+                               int[] keyCols,
+                               int rowCount,
+                               DataValueDescriptor[][] rows,
+                               HashMap simpleHash,
+                               boolean[] isDuplicate,
+                               boolean[] found)
+        throws StandardException
+    {
+        for( int i = 0; i < rowCount; i++)
+            found[i] = false;
+        
+        for( Enumeration e = dht.elements(); e.hasMoreElements();)
+        {
+            Object el = e.nextElement();
+            if( el == null)
+            {
+                REPORT_FAILURE( "  table enumeration returned a null element");
+                return;
+            }
+            if( el instanceof DataValueDescriptor[])
+                checkElement( (DataValueDescriptor[]) el, rowCount, rows, found);
+            else if( el instanceof Vector)
+            {
+                Vector v = (Vector) el;
+                for( int i = 0; i < v.size(); i++)
+                    checkElement( (DataValueDescriptor[]) v.get(i), rowCount, rows, found);
+            }
+            else if( el == null)
+            {
+                REPORT_FAILURE( "  table enumeration returned an incorrect element type");
+                return;
+            }
+        }
+        for( int i = 0; i < rowCount; i++)
+        {
+            if( (removeDups && isDuplicate[i]))
+            {
+                if( found[i])
+                {
+                    REPORT_FAILURE( "  table enumeration did not remove duplicates");
+                    return;
+                }
+            }
+            else if( ! found[i])
+            {
+                REPORT_FAILURE( "  table enumeration missed at least one element");
+                return;
+            }
+        }
+    } // end of testElements
+
+    private void checkElement( DataValueDescriptor[] fetchedRow,
+                               int rowCount,
+                               DataValueDescriptor[][] rows,
+                               boolean[] found)
+        throws StandardException
+    {
+        for( int i = 0; i < rowCount; i++)
+        {
+            if( rowsEqual( fetchedRow, rows[i]))
+            {
+                if( found[i])
+                {
+                    REPORT_FAILURE( "  table enumeration returned the same element twice");
+                    return;
+                }
+                found[i] = true;
+                return;
+            }
+        }
+        REPORT_FAILURE( "  table enumeration returned an incorrect element");
+    } // end of checkElement
+
+    private boolean rowsEqual( Object r1, Object r2)
+        throws StandardException
+    {
+        if( r1 == null)
+            return r2 == null;
+
+        if( r1 instanceof DataValueDescriptor[])
+        {
+            DataValueDescriptor[] row1 = (DataValueDescriptor[]) r1;
+            DataValueDescriptor[] row2;
+            
+            if( r2 instanceof Vector)
+            {
+                Vector v2 = (Vector) r2;
+                if( v2.size() != 1)
+                    return false;
+                row2 = (DataValueDescriptor[]) v2.elementAt(0);
+            }
+            else if( r2 instanceof DataValueDescriptor[])
+                row2 = (DataValueDescriptor[]) r2;
+            else
+                return false;
+            
+            if( row1.length != row2.length)
+                return false;
+            for( int i = 0; i < row1.length; i++)
+            {
+                if( ! row1[i].compare( Orderable.ORDER_OP_EQUALS, row2[i], true, true))
+                    return false;
+            }
+            return true;
+        }
+        if( r1 instanceof Vector)
+        {
+            if( !(r2 instanceof Vector))
+                return false;
+            Vector v1 = (Vector) r1;
+            Vector v2 = (Vector) r2;
+            if( v1.size() != v2.size())
+                return false;
+            for( int i = v1.size() - 1; i >= 0; i--)
+            {
+                if( ! rowsEqual( v1.elementAt( i), v2.elementAt(i)))
+                    return false;
+            }
+            return true;
+        }
+        // What is it then?
+        return r1.equals( r2);
+    } // end of rowsEqual
+}

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDiskHashtable.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDurabilityProperty.java
URL: http://svn.apache.org/viewcvs/db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDurabilityProperty.java?rev=329187&r1=329186&r2=329187&view=diff
==============================================================================
--- db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDurabilityProperty.java (original)
+++ db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDurabilityProperty.java Fri Oct 28 04:51:50 2005
@@ -1,343 +1,343 @@
-/*
- 
- Derby - Class org.apache.derbyTesting.functionTests.tests.store.TestNoSyncs
- 
- Copyright 2002, 2005 The Apache Software Foundation or its licensors, as applicable.
- 
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- 
- http://www.apache.org/licenses/LICENSE-2.0
- 
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- 
- */
-
-package org.apache.derbyTesting.functionTests.tests.store;
-
-import java.sql.Connection;
-import java.sql.Statement;
-import java.sql.PreparedStatement;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.io.*;
-
-/**
- * This program tests the system when the derby.system.durability property is
- * set to 'test'. 
- * <BR>
- * When the derby.system.durability is set to 'test', the system will not do 
- * any sync to the
- * disk and the recovery system will not work property. It is provided for
- * performance reasons and should ideally only be used when there is no
- * requirement for the database to recover.
- * <p>
- * When set, the system will not do any syncs, the cases namely - no sync of the
- * log file at each commit - no sync of the log file before data page is forced
- * to disk - no sync of page allocation when file is grown - no sync of data
- * writes during checkpoint
- * <p>
- * That means, when this property is set to 'test',
- *  - a commit no longer guarantees that the transaction's modification will 
- *    survive a system crash or
- * JVM termination 
- * - the database may not recover successfully upon restart 
- * - a near full disk at runtime may cause unexpected errors
- * - database may be in an inconsistent state
- * 
- * This program tests for 
- * 1. setting the derby.system.durability=test is actually not
- *    doing the syncs by timing inserts
- * 2. check if a warning message exists in derby.log 
- * 3. read log.ctrl file and check if the flag is set or not
- * 4. check if the log.ctrl file flag is not overwritten for the case when 
- * database booted with derby.system.durability=test set, then shutdown
- * and database booted without derby.system.durability=test
- * 
- * @author Sunitha Kambhampati 
- * @version 1.0
- */
-public class TestDurabilityProperty {
-    public static void main(String[] args) {
-        try {
-            // Test 1: check if derby.system.durability=test
-            // mode is not doing syncs 
-            testNoSyncs(args);
-            
-            String derbyHome = System.getProperty("derby.system.home");
-            // Test 2
-            // Check if derby.log has the warning message
-            report("Is warning message about derby.system.durability=test present in derby.log ="
-                    + isMessageInDerbyLog(derbyHome));
-            // Test 3
-            // Check if marker is correctly written out to database
-            markerInControlFile(derbyHome);
-            
-            // Test 4
-            // shutdown database and boot database afresh without 
-            // derby.system.durability set to test. In this case the derby.log 
-            // and the log control file should still have the marker that this 
-            // mode was once used to boot database.
-            report(
-                "2. shutdown database and reboot database without " +
-                "derby.system.durability=test and test for marker in log.ctrl file");
-            markerNotOverwritten(derbyHome);
-
-        } catch (Throwable e) {
-            report("FAIL -- unexpected exception: " + e);
-            e.printStackTrace();
-        }
-
-    }
-
-    /**
-     * time inserts 
-     * 
-     * @param mode
-     *            value for derby.system.durability property
-     * @param create
-     *            should table be created or not
-     * @param autoCommit
-     *            whether inserts should happen in autocommit mode or not
-     * @return time taken to do inserts
-     * @throws Exception
-     */
-    public static long timeTakenToInsert(String mode, boolean create,
-            boolean autoCommit) throws Exception {
-        System.setProperty("derby.system.durability", mode);
-        Connection conn = org.apache.derby.tools.ij.startJBMS();
-
-        if (create) {
-            Statement s = conn.createStatement();
-            s.execute("create table t1 (c1 int, c2 int)");
-            s.close();
-        }
-
-        long timeTaken = doInserts(conn, autoCommit);
-
-        try {
-            conn.close();
-            DriverManager.getConnection("jdbc:derby:;shutdown=true");
-        } catch (SQLException sqle) {
-            if ("XJ015".equals(sqle.getSQLState())) {
-            }// ok database shutdown
-            else {
-                report(sqle.getSQLState());
-                report("ERROR! during shutdown");
-                sqle.printStackTrace();
-            }
-        }
-
-        return timeTaken;
-    }
-
-
-    /**
-     * Note doing inserts in autocommit mode is probably the worst case scenario
-     * in terms of performance as each commit will involve a flush/sync to disk
-     * but in case of the derby.system.durability=test mode, the syncs dont 
-     * happen.
-     */
-    public static long doInserts(Connection conn,boolean autoCommit) throws Exception {
-        PreparedStatement ps = conn
-                .prepareStatement("insert into t1 values(?,?)");
-        conn.setAutoCommit(autoCommit);
-        long count = 0;
-
-        long start = System.currentTimeMillis();
-
-        for (int i = 0; i < 500; i++) {
-            ps.setInt(1, i);
-            ps.setInt(2, i);
-            count += ps.executeUpdate();
-        }
-
-        if (!autoCommit)
-            conn.commit();
-        long end = System.currentTimeMillis();
-        if (count < 500)
-            report(" FAIL!! all rows didnt get inserted ?");
-        
-        return (end - start);
-    }
-
-    /**
-     * When derby.system.durability is set, a warning message is written out to
-     * derby.log indicating that the property is set and that it does not
-     * guarantee recoverability This test tests if a message is written out to
-     * derby.log or not
-     */
-    public static boolean isMessageInDerbyLog(String derbyHome) throws Exception {
-        BufferedReader reader = null;
-        File derbylog = null;
-        try {
-            derbylog = new File(derbyHome, "derby.log");
-            reader = new BufferedReader(new FileReader(derbylog));
-
-            String line = null;
-            while ((line = reader.readLine()) != null) {
-                if (line.indexOf("derby.system.durability=test") != -1)
-                    return true;
-
-            }
-            return false;
-        } finally {
-            if (reader != null) {
-                reader.close();
-            }
-            derbylog = null;
-        }
-    }
-
-    /**
-     * if database is booted with derby.system.durability=test, 
-     * a marker is written out into log control
-     * file to recognize that the database was previously booted in this mode
-     * Test if the marker byte is set correctly or not. See comments in
-     * org.apache.derby.impl.store.log.LogToFile for IS_DURABILITY_TESTMODE_NO_SYNC_FLAG
-     */
-    public static void markerInControlFile(String derbyHome) throws Exception {
-        RandomAccessFile controlFile = null;
-        try {
-            int testModeNoSyncMarkerPosition = 28;
-            byte testModeNoSyncMarker = 0x2;
-            controlFile = new RandomAccessFile(derbyHome
-                    + "/wombat/log/log.ctrl", "r");
-            controlFile.seek(testModeNoSyncMarkerPosition);
-            report("log.ctrl file has durability testMode no sync marker value = "
-                    + ((controlFile.readByte() & testModeNoSyncMarker) != 0) );
-        } finally {
-            if (controlFile != null)
-                controlFile.close();
-
-        }
-    }
-
-    /**
-     * Test for case when database is booted without derby.system.durability=test
-     * but previously has been booted with the derby.system.durability=test. In 
-     * this scenario,the log control file should still have the marker to say
-     * that this mode was set previously, and derby.log must also have a warning
-     * message
-     * @param derbyHome value of derby.system.home where the database is
-     * @throws Exception
-     */
-    public static void markerNotOverwritten(String derbyHome) throws Exception
-    {
-        // shutdown database
-        Connection conn = null;
-        // unset property
-        System.setProperty("derby.system.durability","");
-        conn = org.apache.derby.tools.ij.startJBMS();
-        conn.close();
-        markerInControlFile(derbyHome);
-        report("Is warning message about derby.system.durability=test present in derby.log ="
-                + isMessageInDerbyLog(derbyHome));
-    }
-    
-    /**
-     * print message
-     * @param msg to print out 
-     */
-    public static void report(String msg) {
-        System.out.println(msg);
-    }
-
-    /**
-     * Test if derby.system.durability=test property is broken or not. We time
-     * inserts for 500 repeated inserts and make some approximate estimations on
-     * how fast it should be. Since it is a timing based test, there might be
-     * cases of a really really slow machine in some weird cases where this test
-     * may have diffs in this part of the test.
-     * 
-     * So basically to determine if something is wrong, the following is done
-     * to try best to eliminate issues with slow machines
-     * 1)if inserts with autocommit on is fast enough (an estimated
-     * bound)
-     * 2)if not, then since no syncs happen , check if time taken to do
-     * inserts for autocommit on and off are in proximity range 
-     * 3)if 1 and 2 is not satisfied, then check time taken without this mode set
-     * and with this mode set.
-     * If they are in proximity range something could be wrong. It
-     * might be good to check the machine configuration and environment when
-     * test was running <BR>
-     * Also note, although it would seem like a solution would be to bump the
-     * estimated bound to a high limit, this might not help since on a really
-     * good disk the inserts doing syncs might well be done within the bound and
-     * thus would not be possible to know if this mode was possibly broken.
-     *  
-     */
-    public static void testNoSyncs(String[] args) throws Exception {
-        boolean debug = false;  // if set, prints out useful info when debugging test
-        
-        report("1. With derby.system.durability=test,"
-                + "Test to see if syncs are not happening ");
-        // use the ij utility to read the property file and
-        // make the initial connection.
-        org.apache.derby.tools.ij.getPropertyArg(args);
-
-        boolean create = true;
-
-        // Note we time inserts in normal all syncs case first even
-        // though we may not require it if the inserts finish fast enough
-        // But timing them here because once database is booted with
-        // derby.system.durability=test there are no guarantees on consistency
-        // of database so dont want to mess up numbers for the all syncs case
-
-        // derby.system.durability is not test so it will default to
-        // normal mode and autocommit=true
-        long timeCommitOn = timeTakenToInsert("", create, true);
-        String derbyHome = System.getProperty("derby.system.home");
-        if (isMessageInDerbyLog(derbyHome))
-            report("ERROR! System should not have been booted with"
-                    + "derby.system.durability=test mode here");
-        create = false;
-        // derby.system.durability=test and autocommit=true
-        long timeWithTestModeCommitOn = timeTakenToInsert("test", create, true);
-        // derby.system.durability=test and autocommit=false
-        long timeWithTestModeCommitOff = timeTakenToInsert("test", create, false);
-      
-        if (debug) {
-            report("timeCommitOn = " + timeCommitOn);
-            report("timeWithTestModeCommitOn = " + timeWithTestModeCommitOn);
-            report("timeWithTestModeCommitOff = " + timeWithTestModeCommitOff);
-        }
-
-        // an approximation on the upper bound for time taken to do
-        // inserts in autocommit mode with derby.system.durability=test mode
-        long upperBound = 3000;
-
-        // if it takes a lot of time to do the inserts then do extra checks
-        // to determine if derby.system.durability=test mode is broken or not
-        // because we cant be sure if inserts just took a long time
-        // because of a really slow machine
-        if (timeWithTestModeCommitOn > upperBound) {
-
-            long proximityRange = 1000;
-
-            // in derby.system.durability=test autocommit on or off should
-            // be in same range since syncs are not happening
-            if (Math.abs(timeWithTestModeCommitOn - timeWithTestModeCommitOff) > proximityRange) {
-                // another approximation here (1.5 times of with testmode set)
-                if (timeWithTestModeCommitOn > timeCommitOn
-                        || (timeCommitOn < (1.5 * timeWithTestModeCommitOn))) {
-                    report("FAIL -- derby.system.durability=test mode seems to be broken.");
-                    report("-- In this mode one would expect that inserts with autocommit off and on "
-                            + "would be in the same range as syncs are not happening but the difference "
-                            + "here seems to be more than the approximate estimated range.");
-                    report("-- Also comparing the time taken to do the inserts without this" +
-                            " property set seems to be in the same"
-                            + " range as with this property set.");
-                    report("-- Please note this test times inserts and approximate estimates were " +
-                            "considered to report this observation.");
-                }
-            }
-        }
-
-    }
-}
+/*
+ 
+ Derby - Class org.apache.derbyTesting.functionTests.tests.store.TestNoSyncs
+ 
+ Copyright 2002, 2005 The Apache Software Foundation or its licensors, as applicable.
+ 
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ 
+ http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ 
+ */
+
+package org.apache.derbyTesting.functionTests.tests.store;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.sql.PreparedStatement;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.io.*;
+
+/**
+ * This program tests the system when the derby.system.durability property is
+ * set to 'test'. 
+ * <BR>
+ * When the derby.system.durability is set to 'test', the system will not do 
+ * any sync to the
+ * disk and the recovery system will not work property. It is provided for
+ * performance reasons and should ideally only be used when there is no
+ * requirement for the database to recover.
+ * <p>
+ * When set, the system will not do any syncs, the cases namely - no sync of the
+ * log file at each commit - no sync of the log file before data page is forced
+ * to disk - no sync of page allocation when file is grown - no sync of data
+ * writes during checkpoint
+ * <p>
+ * That means, when this property is set to 'test',
+ *  - a commit no longer guarantees that the transaction's modification will 
+ *    survive a system crash or
+ * JVM termination 
+ * - the database may not recover successfully upon restart 
+ * - a near full disk at runtime may cause unexpected errors
+ * - database may be in an inconsistent state
+ * 
+ * This program tests for 
+ * 1. setting the derby.system.durability=test is actually not
+ *    doing the syncs by timing inserts
+ * 2. check if a warning message exists in derby.log 
+ * 3. read log.ctrl file and check if the flag is set or not
+ * 4. check if the log.ctrl file flag is not overwritten for the case when 
+ * database booted with derby.system.durability=test set, then shutdown
+ * and database booted without derby.system.durability=test
+ * 
+ * @author Sunitha Kambhampati 
+ * @version 1.0
+ */
+public class TestDurabilityProperty {
+    public static void main(String[] args) {
+        try {
+            // Test 1: check if derby.system.durability=test
+            // mode is not doing syncs 
+            testNoSyncs(args);
+            
+            String derbyHome = System.getProperty("derby.system.home");
+            // Test 2
+            // Check if derby.log has the warning message
+            report("Is warning message about derby.system.durability=test present in derby.log ="
+                    + isMessageInDerbyLog(derbyHome));
+            // Test 3
+            // Check if marker is correctly written out to database
+            markerInControlFile(derbyHome);
+            
+            // Test 4
+            // shutdown database and boot database afresh without 
+            // derby.system.durability set to test. In this case the derby.log 
+            // and the log control file should still have the marker that this 
+            // mode was once used to boot database.
+            report(
+                "2. shutdown database and reboot database without " +
+                "derby.system.durability=test and test for marker in log.ctrl file");
+            markerNotOverwritten(derbyHome);
+
+        } catch (Throwable e) {
+            report("FAIL -- unexpected exception: " + e);
+            e.printStackTrace();
+        }
+
+    }
+
+    /**
+     * time inserts 
+     * 
+     * @param mode
+     *            value for derby.system.durability property
+     * @param create
+     *            should table be created or not
+     * @param autoCommit
+     *            whether inserts should happen in autocommit mode or not
+     * @return time taken to do inserts
+     * @throws Exception
+     */
+    public static long timeTakenToInsert(String mode, boolean create,
+            boolean autoCommit) throws Exception {
+        System.setProperty("derby.system.durability", mode);
+        Connection conn = org.apache.derby.tools.ij.startJBMS();
+
+        if (create) {
+            Statement s = conn.createStatement();
+            s.execute("create table t1 (c1 int, c2 int)");
+            s.close();
+        }
+
+        long timeTaken = doInserts(conn, autoCommit);
+
+        try {
+            conn.close();
+            DriverManager.getConnection("jdbc:derby:;shutdown=true");
+        } catch (SQLException sqle) {
+            if ("XJ015".equals(sqle.getSQLState())) {
+            }// ok database shutdown
+            else {
+                report(sqle.getSQLState());
+                report("ERROR! during shutdown");
+                sqle.printStackTrace();
+            }
+        }
+
+        return timeTaken;
+    }
+
+
+    /**
+     * Note doing inserts in autocommit mode is probably the worst case scenario
+     * in terms of performance as each commit will involve a flush/sync to disk
+     * but in case of the derby.system.durability=test mode, the syncs dont 
+     * happen.
+     */
+    public static long doInserts(Connection conn,boolean autoCommit) throws Exception {
+        PreparedStatement ps = conn
+                .prepareStatement("insert into t1 values(?,?)");
+        conn.setAutoCommit(autoCommit);
+        long count = 0;
+
+        long start = System.currentTimeMillis();
+
+        for (int i = 0; i < 500; i++) {
+            ps.setInt(1, i);
+            ps.setInt(2, i);
+            count += ps.executeUpdate();
+        }
+
+        if (!autoCommit)
+            conn.commit();
+        long end = System.currentTimeMillis();
+        if (count < 500)
+            report(" FAIL!! all rows didnt get inserted ?");
+        
+        return (end - start);
+    }
+
+    /**
+     * When derby.system.durability is set, a warning message is written out to
+     * derby.log indicating that the property is set and that it does not
+     * guarantee recoverability This test tests if a message is written out to
+     * derby.log or not
+     */
+    public static boolean isMessageInDerbyLog(String derbyHome) throws Exception {
+        BufferedReader reader = null;
+        File derbylog = null;
+        try {
+            derbylog = new File(derbyHome, "derby.log");
+            reader = new BufferedReader(new FileReader(derbylog));
+
+            String line = null;
+            while ((line = reader.readLine()) != null) {
+                if (line.indexOf("derby.system.durability=test") != -1)
+                    return true;
+
+            }
+            return false;
+        } finally {
+            if (reader != null) {
+                reader.close();
+            }
+            derbylog = null;
+        }
+    }
+
+    /**
+     * if database is booted with derby.system.durability=test, 
+     * a marker is written out into log control
+     * file to recognize that the database was previously booted in this mode
+     * Test if the marker byte is set correctly or not. See comments in
+     * org.apache.derby.impl.store.log.LogToFile for IS_DURABILITY_TESTMODE_NO_SYNC_FLAG
+     */
+    public static void markerInControlFile(String derbyHome) throws Exception {
+        RandomAccessFile controlFile = null;
+        try {
+            int testModeNoSyncMarkerPosition = 28;
+            byte testModeNoSyncMarker = 0x2;
+            controlFile = new RandomAccessFile(derbyHome
+                    + "/wombat/log/log.ctrl", "r");
+            controlFile.seek(testModeNoSyncMarkerPosition);
+            report("log.ctrl file has durability testMode no sync marker value = "
+                    + ((controlFile.readByte() & testModeNoSyncMarker) != 0) );
+        } finally {
+            if (controlFile != null)
+                controlFile.close();
+
+        }
+    }
+
+    /**
+     * Test for case when database is booted without derby.system.durability=test
+     * but previously has been booted with the derby.system.durability=test. In 
+     * this scenario,the log control file should still have the marker to say
+     * that this mode was set previously, and derby.log must also have a warning
+     * message
+     * @param derbyHome value of derby.system.home where the database is
+     * @throws Exception
+     */
+    public static void markerNotOverwritten(String derbyHome) throws Exception
+    {
+        // shutdown database
+        Connection conn = null;
+        // unset property
+        System.setProperty("derby.system.durability","");
+        conn = org.apache.derby.tools.ij.startJBMS();
+        conn.close();
+        markerInControlFile(derbyHome);
+        report("Is warning message about derby.system.durability=test present in derby.log ="
+                + isMessageInDerbyLog(derbyHome));
+    }
+    
+    /**
+     * print message
+     * @param msg to print out 
+     */
+    public static void report(String msg) {
+        System.out.println(msg);
+    }
+
+    /**
+     * Test if derby.system.durability=test property is broken or not. We time
+     * inserts for 500 repeated inserts and make some approximate estimations on
+     * how fast it should be. Since it is a timing based test, there might be
+     * cases of a really really slow machine in some weird cases where this test
+     * may have diffs in this part of the test.
+     * 
+     * So basically to determine if something is wrong, the following is done
+     * to try best to eliminate issues with slow machines
+     * 1)if inserts with autocommit on is fast enough (an estimated
+     * bound)
+     * 2)if not, then since no syncs happen , check if time taken to do
+     * inserts for autocommit on and off are in proximity range 
+     * 3)if 1 and 2 is not satisfied, then check time taken without this mode set
+     * and with this mode set.
+     * If they are in proximity range something could be wrong. It
+     * might be good to check the machine configuration and environment when
+     * test was running <BR>
+     * Also note, although it would seem like a solution would be to bump the
+     * estimated bound to a high limit, this might not help since on a really
+     * good disk the inserts doing syncs might well be done within the bound and
+     * thus would not be possible to know if this mode was possibly broken.
+     *  
+     */
+    public static void testNoSyncs(String[] args) throws Exception {
+        boolean debug = false;  // if set, prints out useful info when debugging test
+        
+        report("1. With derby.system.durability=test,"
+                + "Test to see if syncs are not happening ");
+        // use the ij utility to read the property file and
+        // make the initial connection.
+        org.apache.derby.tools.ij.getPropertyArg(args);
+
+        boolean create = true;
+
+        // Note we time inserts in normal all syncs case first even
+        // though we may not require it if the inserts finish fast enough
+        // But timing them here because once database is booted with
+        // derby.system.durability=test there are no guarantees on consistency
+        // of database so dont want to mess up numbers for the all syncs case
+
+        // derby.system.durability is not test so it will default to
+        // normal mode and autocommit=true
+        long timeCommitOn = timeTakenToInsert("", create, true);
+        String derbyHome = System.getProperty("derby.system.home");
+        if (isMessageInDerbyLog(derbyHome))
+            report("ERROR! System should not have been booted with"
+                    + "derby.system.durability=test mode here");
+        create = false;
+        // derby.system.durability=test and autocommit=true
+        long timeWithTestModeCommitOn = timeTakenToInsert("test", create, true);
+        // derby.system.durability=test and autocommit=false
+        long timeWithTestModeCommitOff = timeTakenToInsert("test", create, false);
+      
+        if (debug) {
+            report("timeCommitOn = " + timeCommitOn);
+            report("timeWithTestModeCommitOn = " + timeWithTestModeCommitOn);
+            report("timeWithTestModeCommitOff = " + timeWithTestModeCommitOff);
+        }
+
+        // an approximation on the upper bound for time taken to do
+        // inserts in autocommit mode with derby.system.durability=test mode
+        long upperBound = 3000;
+
+        // if it takes a lot of time to do the inserts then do extra checks
+        // to determine if derby.system.durability=test mode is broken or not
+        // because we cant be sure if inserts just took a long time
+        // because of a really slow machine
+        if (timeWithTestModeCommitOn > upperBound) {
+
+            long proximityRange = 1000;
+
+            // in derby.system.durability=test autocommit on or off should
+            // be in same range since syncs are not happening
+            if (Math.abs(timeWithTestModeCommitOn - timeWithTestModeCommitOff) > proximityRange) {
+                // another approximation here (1.5 times of with testmode set)
+                if (timeWithTestModeCommitOn > timeCommitOn
+                        || (timeCommitOn < (1.5 * timeWithTestModeCommitOn))) {
+                    report("FAIL -- derby.system.durability=test mode seems to be broken.");
+                    report("-- In this mode one would expect that inserts with autocommit off and on "
+                            + "would be in the same range as syncs are not happening but the difference "
+                            + "here seems to be more than the approximate estimated range.");
+                    report("-- Also comparing the time taken to do the inserts without this" +
+                            " property set seems to be in the same"
+                            + " range as with this property set.");
+                    report("-- Please note this test times inserts and approximate estimates were " +
+                            "considered to report this observation.");
+                }
+            }
+        }
+
+    }
+}

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/TestDurabilityProperty.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/char32675.data
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/char32675trailingblanks.data
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/char32703.data
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/char32703trailingblanks.data
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/cisco.subsql
URL: http://svn.apache.org/viewcvs/db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/cisco.subsql?rev=329187&r1=329186&r2=329187&view=diff
==============================================================================
--- db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/cisco.subsql (original)
+++ db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/cisco.subsql Fri Oct 28 04:51:50 2005
@@ -1,72 +1,72 @@
---------------------------------------------------------------------------------
--- Test 0.1: ddl interaction
---------------------------------------------------------------------------------
-
--- create 1st table.  Drop the table, which will get regular X locks on the
--- rows in the system catalogs (which are different than insert locks).
-set connection create1;
-create table create1 (a int);
-drop table create1;
-
--- create 2nd table
---    The following should not block now that ddl work is done under 
---    repeatable read no matter what the user isolation level.
-set connection create2;
-create table create2 (a int);
-
-set connection create1;
-commit;
-set connection create2;
-commit;
-
---------------------------------------------------------------------------------
--- Test 0.2: normal user data, previous key must block on serializable read,
---           if create1 session isolation is serializable, otherwise it will
---           not block.
---------------------------------------------------------------------------------
-
--- get lock on range of keys 0->10, iso level determines if phantoms allowed.
-set connection create1;
-select * from data where keycol <= 10;
-
--- the following must block if create1 isolation level disallows phantoms
-set connection create2;
-insert into data values (5, '50');
-commit;
-
--- now do the select again from session one to see if a phantom showed up.
-set connection create1;
-select * from data where keycol <= 10;
-
-set connection create1;
-commit;
-delete from data where keycol = 5;
-commit;
-set connection create2;
-commit;
-
---------------------------------------------------------------------------------
--- Test 0.3: normal user data, previous key must block on serializable delete,
---           if create1 session isolation is serializable, otherwise it will
---           not block.
---------------------------------------------------------------------------------
-
--- get lock on range of keys 0->10, iso level determines if phantoms allowed.
-set connection create1;
-delete from data where keycol <= 10;
-
--- the following must block if create1 isolation level disallows phantoms
-set connection create2;
-insert into data values (6, '60');
-commit;
-
--- now do the select again from session one to see if a phantom showed up.
-set connection create1;
-select * from data where keycol <= 10;
-
-set connection create1;
-commit;
-delete from data where keycol = 6;
-commit;
-set connection create2;
-commit;
+--------------------------------------------------------------------------------
+-- Test 0.1: ddl interaction
+--------------------------------------------------------------------------------
+
+-- create 1st table.  Drop the table, which will get regular X locks on the
+-- rows in the system catalogs (which are different than insert locks).
+set connection create1;
+create table create1 (a int);
+drop table create1;
+
+-- create 2nd table
+--    The following should not block now that ddl work is done under 
+--    repeatable read no matter what the user isolation level.
+set connection create2;
+create table create2 (a int);
+
+set connection create1;
+commit;
+set connection create2;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test 0.2: normal user data, previous key must block on serializable read,
+--           if create1 session isolation is serializable, otherwise it will
+--           not block.
+--------------------------------------------------------------------------------
+
+-- get lock on range of keys 0->10, iso level determines if phantoms allowed.
+set connection create1;
+select * from data where keycol <= 10;
+
+-- the following must block if create1 isolation level disallows phantoms
+set connection create2;
+insert into data values (5, '50');
+commit;
+
+-- now do the select again from session one to see if a phantom showed up.
+set connection create1;
+select * from data where keycol <= 10;
+
+set connection create1;
+commit;
+delete from data where keycol = 5;
+commit;
+set connection create2;
+commit;
+
+--------------------------------------------------------------------------------
+-- Test 0.3: normal user data, previous key must block on serializable delete,
+--           if create1 session isolation is serializable, otherwise it will
+--           not block.
+--------------------------------------------------------------------------------
+
+-- get lock on range of keys 0->10, iso level determines if phantoms allowed.
+set connection create1;
+delete from data where keycol <= 10;
+
+-- the following must block if create1 isolation level disallows phantoms
+set connection create2;
+insert into data values (6, '60');
+commit;
+
+-- now do the select again from session one to see if a phantom showed up.
+set connection create1;
+select * from data where keycol <= 10;
+
+set connection create1;
+commit;
+delete from data where keycol = 6;
+commit;
+set connection create2;
+commit;

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/cisco.subsql
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/createTestProcedures.subsql
URL: http://svn.apache.org/viewcvs/db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/createTestProcedures.subsql?rev=329187&r1=329186&r2=329187&view=diff
==============================================================================
--- db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/createTestProcedures.subsql (original)
+++ db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/createTestProcedures.subsql Fri Oct 28 04:51:50 2005
@@ -1,5 +1,5 @@
-
-CREATE FUNCTION  PADSTRING (DATA VARCHAR(32000), LENGTH INTEGER) RETURNS VARCHAR(32000) EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.Formatters.padString' LANGUAGE JAVA PARAMETER STYLE JAVA;
-
-CREATE PROCEDURE WAIT_FOR_POST_COMMIT() DYNAMIC RESULT SETS 0 LANGUAGE JAVA EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.T_Access.waitForPostCommitToFinish' PARAMETER STYLE JAVA;
-
+
+CREATE FUNCTION  PADSTRING (DATA VARCHAR(32000), LENGTH INTEGER) RETURNS VARCHAR(32000) EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.Formatters.padString' LANGUAGE JAVA PARAMETER STYLE JAVA;
+
+CREATE PROCEDURE WAIT_FOR_POST_COMMIT() DYNAMIC RESULT SETS 0 LANGUAGE JAVA EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.T_Access.waitForPostCommitToFinish' PARAMETER STYLE JAVA;
+

Propchange: db/derby/code/trunk/java/testing/org/apache/derbyTesting/functionTests/tests/store/createTestProcedures.subsql
------------------------------------------------------------------------------
    svn:eol-style = native



Mime
View raw message