Return-Path: X-Original-To: apmail-geode-commits-archive@minotaur.apache.org Delivered-To: apmail-geode-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A1FC718012 for ; Mon, 22 Feb 2016 18:36:57 +0000 (UTC) Received: (qmail 79821 invoked by uid 500); 22 Feb 2016 18:36:41 -0000 Delivered-To: apmail-geode-commits-archive@geode.apache.org Received: (qmail 79790 invoked by uid 500); 22 Feb 2016 18:36:41 -0000 Mailing-List: contact commits-help@geode.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@geode.incubator.apache.org Delivered-To: mailing list commits@geode.incubator.apache.org Received: (qmail 79776 invoked by uid 99); 22 Feb 2016 18:36:41 -0000 Received: from pnap-us-west-generic-nat.apache.org (HELO spamd3-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 22 Feb 2016 18:36:41 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd3-us-west.apache.org (ASF Mail Server at spamd3-us-west.apache.org) with ESMTP id 1EEB91805C6 for ; Mon, 22 Feb 2016 18:36:41 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd3-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: -4.349 X-Spam-Level: X-Spam-Status: No, score=-4.349 tagged_above=-999 required=6.31 tests=[KAM_LAZY_DOMAIN_SECURITY=1, RCVD_IN_DNSWL_HI=-5, RCVD_IN_MSPIKE_H3=-0.01, RCVD_IN_MSPIKE_WL=-0.01, RP_MATCHES_RCVD=-0.329] autolearn=disabled Received: from mx2-lw-eu.apache.org ([10.40.0.8]) by localhost (spamd3-us-west.apache.org [10.40.0.10]) (amavisd-new, port 10024) with ESMTP id fbLKch3wBgN9 for ; Mon, 22 Feb 2016 18:35:51 +0000 (UTC) Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx2-lw-eu.apache.org (ASF Mail Server at mx2-lw-eu.apache.org) with SMTP id 817F75FE6D for ; Mon, 22 Feb 2016 18:35:26 +0000 (UTC) Received: (qmail 67520 invoked by uid 99); 22 Feb 2016 18:35:24 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 22 Feb 2016 18:35:24 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 87B7EE048C; Mon, 22 Feb 2016 18:35:23 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: upthewaterspout@apache.org To: commits@geode.incubator.apache.org Date: Mon, 22 Feb 2016 18:35:38 -0000 Message-Id: <6f4c222b1d694b27a8b2b6841f020b83@git.apache.org> In-Reply-To: <0806417cf36345658593943fc13e8a5d@git.apache.org> References: <0806417cf36345658593943fc13e8a5d@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [16/83] [abbrv] [partial] incubator-geode git commit: Merge remote-tracking branch 'origin/develop' into feature/GEODE-917 http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5beaaedc/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java ---------------------------------------------------------------------- diff --cc geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java index 42459c9,0000000..934798c mode 100644,000000..100644 --- a/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java +++ b/geode-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java @@@ -1,1987 -1,0 +1,1987 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.gemstone.gemfire.cache.query.dunit; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +import com.gemstone.gemfire.cache.AttributesFactory; +import com.gemstone.gemfire.cache.Cache; +import com.gemstone.gemfire.cache.CacheException; +import com.gemstone.gemfire.cache.DataPolicy; +import com.gemstone.gemfire.cache.PartitionAttributes; +import com.gemstone.gemfire.cache.PartitionAttributesFactory; +import com.gemstone.gemfire.cache.Region; +import com.gemstone.gemfire.cache.RegionShortcut; +import com.gemstone.gemfire.cache.Scope; +import com.gemstone.gemfire.cache.client.ClientCache; +import com.gemstone.gemfire.cache.client.ClientCacheFactory; +import com.gemstone.gemfire.cache.client.ClientRegionShortcut; +import com.gemstone.gemfire.cache.client.PoolFactory; +import com.gemstone.gemfire.cache.client.PoolManager; +import com.gemstone.gemfire.cache.query.Index; +import com.gemstone.gemfire.cache.query.Query; +import com.gemstone.gemfire.cache.query.QueryService; +import com.gemstone.gemfire.cache.query.SelectResults; +import com.gemstone.gemfire.cache.query.Struct; +import com.gemstone.gemfire.cache.query.data.Portfolio; +import com.gemstone.gemfire.cache.query.data.PortfolioPdx; +import com.gemstone.gemfire.cache.query.data.PositionPdx; +import com.gemstone.gemfire.cache.query.internal.DefaultQuery; +import com.gemstone.gemfire.cache.query.internal.index.CompactRangeIndex; +import com.gemstone.gemfire.cache.query.internal.index.IndexManager; +import com.gemstone.gemfire.cache.query.internal.index.IndexStore.IndexStoreEntry; +import com.gemstone.gemfire.cache.query.internal.index.PartitionedIndex; +import com.gemstone.gemfire.cache.query.internal.index.RangeIndex; +import com.gemstone.gemfire.cache.query.types.CollectionType; +import com.gemstone.gemfire.cache.query.types.ObjectType; +import com.gemstone.gemfire.cache.server.CacheServer; +import com.gemstone.gemfire.cache30.ClientServerTestCase; +import com.gemstone.gemfire.cache30.CacheSerializableRunnable; +import com.gemstone.gemfire.cache30.CacheTestCase; +import com.gemstone.gemfire.internal.AvailablePortHelper; +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; +import com.gemstone.gemfire.internal.cache.persistence.query.CloseableIterator; +import com.gemstone.gemfire.pdx.internal.PdxString; +import com.gemstone.gemfire.test.dunit.Assert; +import com.gemstone.gemfire.test.dunit.DistributedTestCase; +import com.gemstone.gemfire.test.dunit.Host; +import com.gemstone.gemfire.test.dunit.IgnoredException; +import com.gemstone.gemfire.test.dunit.Invoke; +import com.gemstone.gemfire.test.dunit.LogWriterUtils; +import com.gemstone.gemfire.test.dunit.NetworkUtils; +import com.gemstone.gemfire.test.dunit.SerializableCallable; +import com.gemstone.gemfire.test.dunit.SerializableRunnable; +import com.gemstone.gemfire.test.dunit.VM; + +public class PdxStringQueryDUnitTest extends CacheTestCase{ + private static int bridgeServerPort; + + public PdxStringQueryDUnitTest(String name) { + super(name); + } + + private final String rootRegionName = "root"; + private final String regionName = "PdxTest"; + private final String regName = "/" + rootRegionName + "/" + regionName; + private final static int orderByQueryIndex = 11; + private final static int [] groupByQueryIndex = new int[]{7, 8, 9,10}; + + private final String[] queryString = new String[] { + "SELECT pos.secId FROM " + regName + " p, p.positions.values pos WHERE pos.secId LIKE '%L'",//0 + "SELECT pos.secId FROM " + regName + " p, p.positions.values pos where pos.secId = 'IBM'",//1 + "SELECT pos.secId, p.status FROM " + regName + " p, p.positions.values pos where pos.secId > 'APPL'",//2 + "SELECT pos.secId FROM " + regName + " p, p.positions.values pos WHERE pos.secId > 'APPL' and pos.secId < 'SUN'",//3 + "select pos.secId from " + regName + " p, p.positions.values pos where pos.secId IN SET ('YHOO', 'VMW')",//4 + "select pos.secId from " + regName + " p, p.positions.values pos where NOT (pos.secId = 'VMW')",//5 + "select pos.secId from " + regName + " p, p.positions.values pos where NOT (pos.secId IN SET('SUN', 'ORCL')) ",//6 + "select pos.secId , count(pos.id) from " + regName + " p, p.positions.values pos where pos.secId > 'APPL' group by pos.secId ",//7 + "select pos.secId , sum(pos.id) from " + regName + " p, p.positions.values pos where pos.secId > 'APPL' group by pos.secId ",//8, + "select pos.secId , count(distinct pos.secId) from " + regName + " p, p.positions.values pos where pos.secId > 'APPL' group by pos.secId ",//9 + "select count(distinct pos.secId) from " + regName + " p, p.positions.values pos where pos.secId > 'APPL' ",//10 + "SELECT distinct pos.secId FROM " + regName + " p, p.positions.values pos order by pos.secId",//11 + "SELECT distinct pos.secId FROM " + regName + " p, p.positions.values pos WHERE p.ID > 1 order by pos.secId limit 5",//12 + }; + + private final String[] queryString2 = new String[] { + "SELECT pos.secIdIndexed FROM " + regName + " p, p.positions.values pos WHERE pos.secIdIndexed LIKE '%L'",//0 + "SELECT pos.secIdIndexed FROM " + regName + " p, p.positions.values pos where pos.secIdIndexed = 'IBM'",//1 + "SELECT pos.secIdIndexed, p.status FROM " + regName + " p, p.positions.values pos where pos.secIdIndexed > 'APPL'",//2 + "SELECT pos.secIdIndexed FROM " + regName + " p, p.positions.values pos WHERE pos.secIdIndexed > 'APPL' and pos.secIdIndexed < 'SUN'",//3 + "select pos.secIdIndexed from " + regName + " p, p.positions.values pos where pos.secIdIndexed IN SET ('YHOO', 'VMW')",//4 + "select pos.secIdIndexed from " + regName + " p, p.positions.values pos where NOT (pos.secIdIndexed = 'VMW')",//5 + "select pos.secIdIndexed from " + regName + " p, p.positions.values pos where NOT (pos.secIdIndexed IN SET('SUN', 'ORCL')) ",//6 + "select pos.secIdIndexed , count(pos.id) from " + regName + " p, p.positions.values pos where pos.secIdIndexed > 'APPL' group by pos.secIdIndexed ",//7 + "select pos.secIdIndexed , sum(pos.id) from " + regName + " p, p.positions.values pos where pos.secIdIndexed > 'APPL' group by pos.secIdIndexed ",//8 + "select pos.secIdIndexed , count(distinct pos.secIdIndexed) from " + regName + " p, p.positions.values pos where pos.secIdIndexed > 'APPL' group by pos.secIdIndexed ",//9 + "select count(distinct pos.secIdIndexed) from " + regName + " p, p.positions.values pos where pos.secIdIndexed > 'APPL' ",//10 + "SELECT distinct pos.secIdIndexed FROM " + regName + " p, p.positions.values pos order by pos.secIdIndexed",//11 + "SELECT distinct pos.secIdIndexed FROM " + regName + " p, p.positions.values pos WHERE p.ID > 1 order by pos.secIdIndexed limit 5",//12 + }; + + public void testReplicatedRegionNoIndex() throws CacheException { + final Host host = Host.getHost(0); + VM server0 = host.getVM(0); + VM server1 = host.getVM(1); + VM server2 = host.getVM(2); + VM client = host.getVM(3); + final int numberOfEntries = 10; + + // Start server1 and create index + server0.invoke(new CacheSerializableRunnable("Create Server1") { + public void run2() throws CacheException { + configAndStartBridgeServer(false,false,false); + // create a local query service + QueryService localQueryService = null; + try { + localQueryService = getCache().getQueryService(); + } catch (Exception e) { + Assert.fail("Failed to get QueryService.", e); + } + Index index = null; + // create an index on statusIndexed is created + try { + index = localQueryService.createIndex("secIdIndex2", "pos.secIdIndexed", regName + " p, p.positions.values pos"); + if(!(index instanceof RangeIndex)){ + fail("Range Index should have been created instead of " + index.getClass()); + } + } catch (Exception ex) { + fail("Failed to create index." + ex.getMessage()); + } + } + }); + + // Start server2 + server1.invoke(new CacheSerializableRunnable("Create Server2") { + public void run2() throws CacheException { + configAndStartBridgeServer(false,false, false); + Region region = getRootRegion().getSubregion(regionName); + } + }); + + // Start server3 + server2.invoke(new CacheSerializableRunnable("Create Server3") { + public void run2() throws CacheException { + configAndStartBridgeServer(false,false, false); + Region region = getRootRegion().getSubregion(regionName); + } + }); + + // Client pool. - final int port0 = server0.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort"); - final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort"); - final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort"); ++ final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i iter = ((CompactRangeIndex) index) + .getIndexStorage().iterator(null); + while (iter.hasNext()) { + Object key = iter.next().getDeserializedKey(); + if (!(key instanceof PdxString)) { + fail("All keys of the CompactRangeIndex should be PdxStrings and not " + + key.getClass()); + } + } + } + }); + + // Execute queries from client to server and locally on client + SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") { + public void run2() throws CacheException { + QueryService remoteQueryService = null; + QueryService localQueryService = null; + SelectResults[][] rs = new SelectResults[1][2]; + try { + remoteQueryService = (PoolManager.find(poolName)).getQueryService(); + localQueryService = getCache().getQueryService(); + } catch (Exception e) { + Assert.fail("Failed to get QueryService.", e); + } + + for (int i=0; i < queryString.length; i++){ + try { + LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]); + Query query = remoteQueryService.newQuery(queryString[i]); + rs[0][0] = (SelectResults)query.execute(); + LogWriterUtils.getLogWriter().info("RR remote indexType: CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);; + checkForPdxString(rs[0][0].asList(), queryString[i]); + + LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]); + query = localQueryService.newQuery(queryString[i]); + rs[0][1] = (SelectResults)query.execute(); + LogWriterUtils.getLogWriter().info("RR client local indexType: CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);; + checkForPdxString(rs[0][1].asList(), queryString[i]); + + if(i < orderByQueryIndex){ + // Compare local and remote query results. + if (!compareResultsOfWithAndWithoutIndex(rs)){ + fail("Local and Remote Query Results are not matching for query :" + queryString[i]); + } + } + else{ + //compare the order of results returned + compareResultsOrder(rs, false); + } + } catch (Exception e) { + Assert.fail("Failed executing " + queryString[i], e); + } + } + + } + }; + + client.invoke(executeQueries); + + // Put Non Pdx objects on server execute queries locally + server0.invoke(new CacheSerializableRunnable("Create Bridge Server") { + public void run2() throws CacheException { + Region region = getRootRegion().getSubregion(regionName); + + LogWriterUtils.getLogWriter().info("Put Objects locally on server"); + for (int i=numberOfEntries; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i iter = ((CompactRangeIndex) o) + .getIndexStorage().iterator(null); + while (iter.hasNext()) { + Object key = iter.next().getDeserializedKey(); + if (!(key instanceof PdxString)) { + fail("All keys of the CompactRangeIndex in the Partitioned index should be PdxStrings and not " + + key.getClass()); + } + } + } + } + else{ + fail("Partitioned index expected"); + } + } + }); + + // Execute queries from client to server and locally on client + SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") { + public void run2() throws CacheException { + QueryService remoteQueryService = null; + QueryService localQueryService = null; + SelectResults[][] rs = new SelectResults[1][2]; + + try { + remoteQueryService = (PoolManager.find(poolName)).getQueryService(); + localQueryService = getCache().getQueryService(); + } catch (Exception e) { + Assert.fail("Failed to get QueryService.", e); + } + + for (int i=0; i < queryString.length; i++){ + try { + LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]); + Query query = remoteQueryService.newQuery(queryString[i]); + rs[0][0] = (SelectResults)query.execute(); + LogWriterUtils.getLogWriter().info("RR remote indexType:CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);; + checkForPdxString(rs[0][0].asList(), queryString[i]); + + LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]); + query = localQueryService.newQuery(queryString[i]); + rs[0][1] = (SelectResults)query.execute(); + LogWriterUtils.getLogWriter().info("isPR: " + isPr+ " client local indexType:CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);; + checkForPdxString(rs[0][1].asList(), queryString[i]); + + if(i < orderByQueryIndex){ + // Compare local and remote query results. + if (!compareResultsOfWithAndWithoutIndex(rs)){ + fail("Local and Remote Query Results are not matching for query :" + queryString[i]); + } + } + else{ + //compare the order of results returned + compareResultsOrder(rs, isPr); + } + } catch (Exception e) { + Assert.fail("Failed executing " + queryString[i], e); + } + } + } + }; + + client.invoke(executeQueries); + // Put Non Pdx objects on server execute queries locally + server0.invoke(new CacheSerializableRunnable("Create Bridge Server") { + public void run2() throws CacheException { + Region region = getRootRegion().getSubregion(regionName); + + LogWriterUtils.getLogWriter().info("Put Objects locally on server"); + for (int i=numberOfEntries; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + for (int i=0; i PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); ++ final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort()); + + final String host0 = NetworkUtils.getServerHostName(server0.getHost()); + + // Create client pool. + final String poolName = "testClientServerQueryPool"; + createPool(client, poolName, new String[]{host0}, new int[]{port0, port1, port2}, true); + + // Create client region and put PortfolioPdx objects (PdxInstances) + client.invoke(new CacheSerializableRunnable("Create client") { + public void run2() throws CacheException { + AttributesFactory factory = new AttributesFactory(); + factory.setScope(Scope.LOCAL); + ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null); + Region region = createRegion(regionName, rootRegionName, factory.create()); + + LogWriterUtils.getLogWriter().info("Put PortfolioPdx"); + // Put some PortfolioPdx objects with null Status and secIds + for (int i=0; i iter = ((CompactRangeIndex) o).getIndexStorage().iterator(null); + while (iter.hasNext()) { + Object key = iter.next().getDeserializedKey(); + if (!(key instanceof PdxString) && !(key == IndexManager.NULL)) { + fail("All keys of the CompactRangeIndex in the Partitioned index should be PdxStrings and not " + + key.getClass()); + } + } + } + } + else{ + fail("Partitioned index expected"); + } + } + }); + + // Execute queries from client to server and locally on client + client.invoke( new CacheSerializableRunnable("Execute queries") { + public void run2() throws CacheException { + QueryService remoteQueryService = null; + QueryService localQueryService = null; + SelectResults[][] rs = new SelectResults[1][2]; + + try { + remoteQueryService = (PoolManager.find(poolName)).getQueryService(); + localQueryService = getCache().getQueryService(); + } catch (Exception e) { + Assert.fail("Failed to get QueryService.", e); + } + + // Querying the fields with null values + String[] qs = {"SELECT pos.secId FROM " + regName + " p, p.positions.values pos where p.status = null", + "SELECT p.pkid FROM " + regName + " p, p.positions.values pos where pos.secId = null"}; + + for(int i = 0; i <2; i++){ + try { + Query query = remoteQueryService.newQuery(qs[i]); + SelectResults res = (SelectResults)query.execute(); + LogWriterUtils.getLogWriter().info("PR NULL Pdxstring test size of resultset: "+ res.size() + " for query: " + qs[i]);; + if(i == 0){ + for(Object o : res){ + if(o != null){ + fail("Query : " + qs[i] + " should have returned null and not " + o); + } + } + }else{ + checkForPdxString(res.asList(), qs[i]); + } + } catch (Exception e) { + Assert.fail("Failed executing " + qs[i], e); + } + } + } + }); + + this.closeClient(server2); + this.closeClient(client); + this.closeClient(server1); + this.closeClient(server0); + } + + + private void compareResultsOrder(SelectResults[][] r, boolean isPr){ + for (int j = 0; j < r.length; j++) { + Object []r1 = (r[j][0]).toArray(); + Object []r2 = (r[j][1]).toArray(); + if(r1.length != r2.length){ + fail("Size of results not equal: " + r1.length + " vs " + r2.length); + } + for (int i = 0, k=0; i < r1.length && k < r2.length; i++,k++) { + System.out.println("r1: " + r1[i] + " r2: " + r2[k]); + if(!r1[i].equals(r2[k])){ + fail("Order not equal: " + r1[i] + " : " +r2[k] + " isPR: " + isPr ); + } + } + } + } + + private void checkForPdxString(List results, String query) { + boolean isGroupByQuery = false; + for (int i : groupByQueryIndex) { + if (query.equals(queryString[i]) || query.equals(queryString2[i])) { + isGroupByQuery = true; + break; + } + } + for (Object o : results) { + if (o instanceof Struct) { + if (!isGroupByQuery) { + Object o1 = ((Struct) o).getFieldValues()[0]; + Object o2 = ((Struct) o).getFieldValues()[1]; + if (!(o1 instanceof String)) { + fail("Returned instance of " + o1.getClass() + + " instead of String for query: " + query); + } + + if (!(o2 instanceof String)) { + fail("Returned instance of " + o2.getClass() + + " instead of String for query: " + query); + } + } + } else { + if (!isGroupByQuery) { + if (!(o instanceof String)) { + fail("Returned instance of " + o.getClass() + + " instead of String for query: " + query); + } + } + } + } + } + + public boolean compareResultsOfWithAndWithoutIndex(SelectResults[][] r ) { + boolean ok = true; + Set set1 = null; + Set set2 = null; + Iterator itert1 = null; + Iterator itert2 = null; + ObjectType type1, type2; + outer: for (int j = 0; j < r.length; j++) { + CollectionType collType1 = r[j][0].getCollectionType(); + CollectionType collType2 = r[j][1].getCollectionType(); + type1 = collType1.getElementType(); + type2 = collType2.getElementType(); + + if (r[j][0].size() == r[j][1].size()) { + System.out.println("Both SelectResults are of Same