ignite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From akuznet...@apache.org
Subject incubator-ignite git commit: # IGNITE-480 Rework scala examples to dynamic caches.
Date Mon, 16 Mar 2015 02:57:32 GMT
Repository: incubator-ignite
Updated Branches:
  refs/heads/ignite-45 89ab9a0a7 -> c7137c3cb


# IGNITE-480 Rework scala examples to dynamic caches.


Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/c7137c3c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/c7137c3c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/c7137c3c

Branch: refs/heads/ignite-45
Commit: c7137c3cb3f492486cc7ce20a294a6686379d94e
Parents: 89ab9a0
Author: AKuznetsov <akuznetsov@gridgain.com>
Authored: Mon Mar 16 09:57:32 2015 +0700
Committer: AKuznetsov <akuznetsov@gridgain.com>
Committed: Mon Mar 16 09:57:32 2015 +0700

----------------------------------------------------------------------
 .../examples/ScalarCacheAffinityExample1.scala  | 70 +++++++++--------
 .../examples/ScalarCacheAffinityExample2.scala  | 82 ++++++++++----------
 .../ScalarCacheAffinitySimpleExample.scala      | 25 +++---
 .../scalar/examples/ScalarCacheExample.scala    | 19 +++--
 .../ScalarCachePopularNumbersExample.scala      | 70 +++++++++--------
 .../examples/ScalarCacheQueryExample.scala      | 21 +++--
 .../examples/ScalarSnowflakeSchemaExample.scala | 70 ++++++++++-------
 .../scala/org/apache/ignite/scalar/scalar.scala | 24 ++++--
 8 files changed, 221 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample1.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample1.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample1.scala
index 7f6e3ee..c99a284 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample1.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample1.scala
@@ -37,10 +37,10 @@ import org.jetbrains.annotations.Nullable
  */
 object ScalarCacheAffinityExample1 {
     /** Configuration file name. */
-    private val CONFIG = "examples/config/example-cache.xml" // Cache.
+    private val CONFIG = "examples/config/example-compute.xml"
 
-    /** Name of cache specified in spring configuration. */
-    private val NAME = "partitioned"
+    /** Name of cache. */
+    private val NAME = ScalarCacheAffinityExample1.getClass.getSimpleName
 
     /**
      * Example entry point. No arguments required.
@@ -50,47 +50,49 @@ object ScalarCacheAffinityExample1 {
      */
     def main(args: Array[String]) {
         scalar(CONFIG) {
-            // Clean up caches on all nodes before run.
-            cache$(NAME).get.clear()
+            val cache = createCache$[String, String](NAME)
 
-            var keys = Seq.empty[String]
+            try {
+                val keys = ('A' to 'Z').map(_.toString).toSeq
 
-            ('A' to 'Z').foreach(keys :+= _.toString)
+                populateCache(ignite$, keys)
 
-            populateCache(ignite$, keys)
+                var results = Map.empty[String, String]
 
-            var results = Map.empty[String, String]
+                keys.foreach(key => {
+                    val res = ignite$.call$(
+                        new IgniteCallable[String] {
+                            @CacheAffinityKeyMapped
+                            def affinityKey(): String = key
 
-            keys.foreach(key => {
-                val res = ignite$.call$(
-                    new IgniteCallable[String] {
-                        @CacheAffinityKeyMapped
-                        def affinityKey(): String = key
+                            def cacheName(): String = NAME
 
-                        def cacheName(): String = NAME
+                            @Nullable def call: String = {
+                                println(">>> Executing affinity job for key: " +
key)
 
-                        @Nullable def call: String = {
-                            println(">>> Executing affinity job for key: " + key)
+                                val cache = cache$[String, String](NAME)
 
-                            val cache = cache$[String, String](NAME)
+                                if (!cache.isDefined) {
+                                    println(">>> Cache not found [nodeId=" + ignite$.cluster().localNode.id
+
+                                        ", cacheName=" + NAME + ']')
 
-                            if (!cache.isDefined) {
-                                println(">>> Cache not found [nodeId=" + ignite$.cluster().localNode.id
+
-                                    ", cacheName=" + NAME + ']')
-
-                                "Error"
+                                    "Error"
+                                }
+                                else
+                                    cache.get.localPeek(key)
                             }
-                            else
-                                cache.get.localPeek(key)
-                        }
-                    },
-                    null
-                )
-
-                results += (key -> res.head)
-            })
-
-            results.foreach(e => println(">>> Affinity job result for key '"
+ e._1 + "': " + e._2))
+                        },
+                        null
+                    )
+
+                    results += (key -> res.head)
+                })
+
+                results.foreach(e => println(">>> Affinity job result for key
'" + e._1 + "': " + e._2))
+            }
+            finally {
+                cache.close()
+            }
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample2.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample2.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample2.scala
index d1d9cdd..a683884 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample2.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample2.scala
@@ -33,10 +33,10 @@ import scala.util.control.Breaks._
  */
 object ScalarCacheAffinityExample2 {
     /** Configuration file name. */
-    private val CONFIG = "examples/config/example-cache.xml" // Cache.
+    private val CONFIG = "examples/config/example-compute.xml"
 
-    /** Name of cache specified in spring configuration. */
-    private val NAME = "partitioned"
+    /** Name of cache. */
+    private val NAME = ScalarCacheAffinityExample2.getClass.getSimpleName
 
     /**
      * Example entry point. No arguments required.
@@ -46,42 +46,46 @@ object ScalarCacheAffinityExample2 {
      */
     def main(args: Array[String]) {
         scalar(CONFIG) {
-            // Clean up caches on all nodes before run.
-            cache$(NAME).get.clear()
-
-            var keys = Seq.empty[String]
-
-            ('A' to 'Z').foreach(keys :+= _.toString)
-
-            populateCache(ignite$, keys)
-
-            // Map all keys to nodes.
-            val mappings = ignite$.cluster().mapKeysToNodes(NAME, keys)
-
-            mappings.foreach(mapping => {
-                val node = mapping._1
-                val mappedKeys = mapping._2
-
-                if (node != null) {
-                    ignite$.cluster().forNode(node) *< (() => {
-                        breakable {
-                            println(">>> Executing affinity job for keys: " + mappedKeys)
-
-                            // Get cache.
-                            val cache = cache$[String, String](NAME)
-
-                            // If cache is not defined at this point then it means that
-                            // job was not routed by affinity.
-                            if (!cache.isDefined)
-                                println(">>> Cache not found [nodeId=" + ignite$.cluster().localNode().id()
+
-                                    ", cacheName=" + NAME + ']').^^
-
-                            // Check cache without loading the value.
-                            mappedKeys.foreach(key => println(">>> Peeked at:
" + cache.get.localPeek(key)))
-                        }
-                    }, null)
-                }
-            })
+            val cache = createCache$[String, String](NAME)
+
+            try {
+                var keys = Seq.empty[String]
+
+                ('A' to 'Z').foreach(keys :+= _.toString)
+
+                populateCache(ignite$, keys)
+
+                // Map all keys to nodes.
+                val mappings = ignite$.cluster().mapKeysToNodes(NAME, keys)
+
+                mappings.foreach(mapping => {
+                    val node = mapping._1
+                    val mappedKeys = mapping._2
+
+                    if (node != null) {
+                        ignite$.cluster().forNode(node) *<(() => {
+                            breakable {
+                                println(">>> Executing affinity job for keys: "
+ mappedKeys)
+
+                                // Get cache.
+                                val cache = cache$[String, String](NAME)
+
+                                // If cache is not defined at this point then it means that
+                                // job was not routed by affinity.
+                                if (!cache.isDefined)
+                                    println(">>> Cache not found [nodeId=" + ignite$.cluster().localNode().id()
+
+                                        ", cacheName=" + NAME + ']').^^
+
+                                // Check cache without loading the value.
+                                mappedKeys.foreach(key => println(">>> Peeked
at: " + cache.get.localPeek(key)))
+                            }
+                        }, null)
+                    }
+                })
+            }
+            finally {
+                cache.close()
+            }
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinitySimpleExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinitySimpleExample.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinitySimpleExample.scala
index eff4fda..ce6dd72 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinitySimpleExample.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinitySimpleExample.scala
@@ -36,12 +36,15 @@ import org.apache.ignite.scalar.scalar._
  * be started with or without cache.
  */
 object ScalarCacheAffinitySimpleExample extends App {
+    /** Configuration file name. */
+    private val CONFIG = "examples/config/example-compute.xml"
+
+    /** Name of cache. */
+    private val NAME = ScalarCacheAffinitySimpleExample.getClass.getSimpleName
+
     /** Number of keys. */
     private val KEY_CNT = 20
 
-    /** Name of cache specified in spring configuration. */
-    private val NAME = "partitioned"
-
     /** Type alias. */
     type Cache = IgniteCache[Int, String]
 
@@ -49,14 +52,16 @@ object ScalarCacheAffinitySimpleExample extends App {
      * Note that in case of `LOCAL` configuration,
      * since there is no distribution, values may come back as `nulls`.
      */
-    scalar("examples/config/example-cache.xml") {
-        // Clean up caches on all nodes before run.
-        cache$(NAME).get.clear()
-
-        val c = ignite$.jcache[Int, String](NAME)
+    scalar(CONFIG) {
+        val cache = createCache$[Int, String](NAME)
 
-        populate(c)
-        visit(c)
+        try {
+            populate(cache)
+            visit(cache)
+        }
+        finally {
+            cache.close()
+        }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
index e834da3..97424e2 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
@@ -33,16 +33,23 @@ import scala.collection.JavaConversions._
  * be started with or without cache.
  */
 object ScalarCacheExample extends App {
+    /** Configuration file name. */
+    private val CONFIG = "examples/config/example-compute.xml"
+
     /** Name of cache specified in spring configuration. */
-    private val NAME = "partitioned"
+    private val NAME = ScalarCacheExample.getClass.getSimpleName
 
-    scalar("examples/config/example-cache.xml") {
-        // Clean up caches on all nodes before run.
-        cache$(NAME).get.clear()
+    scalar(CONFIG) {
+        val cache = createCache$[String, Int](NAME)
 
-        registerListener()
+        try {
+            registerListener()
 
-        basicOperations()
+            basicOperations()
+        }
+        finally {
+            cache.close()
+        }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
index 2a5b8c6..094f452 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
@@ -17,9 +17,7 @@
 
 package org.apache.ignite.scalar.examples
 
-import java.util.Timer
-
-import org.apache.ignite.IgniteException
+import org.apache.ignite.cache.query.SqlFieldsQuery
 import org.apache.ignite.examples.ExampleNodeStartup
 import org.apache.ignite.internal.util.scala.impl
 import org.apache.ignite.scalar.scalar
@@ -27,6 +25,7 @@ import org.apache.ignite.scalar.scalar._
 import org.apache.ignite.{IgniteCache, IgniteDataStreamer, IgniteException}
 
 import javax.cache.processor.{EntryProcessor, MutableEntry}
+import java.lang.{Integer => JavaInt, Long => JavaLong}
 import java.util
 import java.util.Map.Entry
 import java.util.Timer
@@ -47,8 +46,11 @@ import scala.util.Random
  * an overall top `10` list within the ignite.
  */
 object ScalarCachePopularNumbersExample extends App {
+    /** Configuration file name. */
+    private val CONFIG = "examples/config/example-compute.xml"
+
     /** Cache name. */
-    private final val CACHE_NAME = "partitioned"
+    private final val NAME = ScalarCachePopularNumbersExample.getClass.getSimpleName
 
     /** Count of most popular numbers to retrieve from cluster. */
     private final val POPULAR_NUMBERS_CNT = 10
@@ -62,36 +64,40 @@ object ScalarCachePopularNumbersExample extends App {
     /** Count of total numbers to generate. */
     private final val CNT = 1000000
 
-    scalar("examples/config/example-cache.xml") {
-        // Clean up caches on all nodes before run.
-        cache$(CACHE_NAME).get.clear()
+    scalar(CONFIG) {
+        val cache = createCache$[JavaInt, JavaLong](NAME, indexedTypes = Seq(classOf[JavaInt],
classOf[JavaLong]))
 
         println()
         println(">>> Cache popular numbers example started.")
 
-        val prj = ignite$.cluster().forCacheNodes(CACHE_NAME)
+        try {
+            val prj = ignite$.cluster().forCacheNodes(NAME)
 
-        if (prj.nodes().isEmpty)
-            println("Ignite does not have cache configured: " + CACHE_NAME)
-        else {
-            val popularNumbersQryTimer = new Timer("numbers-query-worker")
+            if (prj.nodes().isEmpty)
+                println("Ignite does not have cache configured: " + NAME)
+            else {
+                val popularNumbersQryTimer = new Timer("numbers-query-worker")
 
-            try {
-                // Schedule queries to run every 3 seconds during populates cache phase.
-                popularNumbersQryTimer.schedule(timerTask(query(POPULAR_NUMBERS_CNT)), 3000,
3000)
+                try {
+                    // Schedule queries to run every 3 seconds during populates cache phase.
+                    popularNumbersQryTimer.schedule(timerTask(query(POPULAR_NUMBERS_CNT)),
3000, 3000)
 
-                streamData()
+                    streamData()
 
-                // Force one more run to get final counts.
-                query(POPULAR_NUMBERS_CNT)
+                    // Force one more run to get final counts.
+                    query(POPULAR_NUMBERS_CNT)
 
-                // Clean up caches on all nodes after run.
-                ignite$.cluster().forCacheNodes(CACHE_NAME).bcastRun(() => ignite$.jcache(CACHE_NAME).clear(),
null)
-            }
-            finally {
-                popularNumbersQryTimer.cancel()
+                    // Clean up caches on all nodes after run.
+                    ignite$.cluster().forCacheNodes(NAME).bcastRun(() => ignite$.jcache(NAME).clear(),
null)
+                }
+                finally {
+                    popularNumbersQryTimer.cancel()
+                }
             }
         }
+        finally {
+            cache.close()
+        }
     }
 
     /**
@@ -102,7 +108,7 @@ object ScalarCachePopularNumbersExample extends App {
     def streamData() {
         // Set larger per-node buffer size since our state is relatively small.
         // Reduce parallel operations since we running the whole ignite cluster locally under
heavy load.
-        val smtr = dataStreamer$[Int, Long](CACHE_NAME, 2048)
+        val smtr = dataStreamer$[JavaInt, JavaLong](NAME, 2048)
 
         smtr.updater(new IncrementingUpdater())
 
@@ -117,8 +123,8 @@ object ScalarCachePopularNumbersExample extends App {
      * @param cnt Number of most popular numbers to return.
      */
     def query(cnt: Int) {
-        val results = cache$[Int, Long](CACHE_NAME).get
-            .sqlFields(clause = "select _key, _val from Long order by _val desc, _key limit
" + cnt)
+        val results = cache$[JavaInt, JavaLong](NAME).get
+            .queryFields(new SqlFieldsQuery("select _key, _val from Long order by _val desc,
_key limit " + cnt))
             .getAll
 
         results.foreach(res => println(res.get(0) + "=" + res.get(1)))
@@ -129,17 +135,19 @@ object ScalarCachePopularNumbersExample extends App {
     /**
      * Increments value for key.
      */
-    private class IncrementingUpdater extends IgniteDataStreamer.Updater[Int, Long] {
-        private[this] final val INC = new EntryProcessor[Int, Long, Object]() {
+    private class IncrementingUpdater extends IgniteDataStreamer.Updater[JavaInt, JavaLong]
{
+        private[this] final val INC = new EntryProcessor[JavaInt, JavaLong, Object]() {
             /** Process entries to increase value by entry key. */
-            override def process(e: MutableEntry[Int, Long], args: AnyRef*): Object = {
-                e.setValue(Option(e.getValue).map(_ + 1).getOrElse(1L))
+            override def process(e: MutableEntry[JavaInt, JavaLong], args: AnyRef*): Object
= {
+                e.setValue(Option(e.getValue)
+                    .map(l => JavaLong.valueOf(l + 1))
+                    .getOrElse(JavaLong.valueOf(1L)))
 
                 null
             }
         }
 
-        @impl def update(cache: IgniteCache[Int, Long], entries: util.Collection[Entry[Int,
Long]]) {
+        @impl def update(cache: IgniteCache[JavaInt, JavaLong], entries: util.Collection[Entry[JavaInt,
JavaLong]]) {
             entries.foreach(entry => cache.invoke(entry.getKey, INC))
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
index 9de8954..a5da9d7 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
@@ -37,8 +37,11 @@ import collection.JavaConversions._
  * be started with or without cache.
  */
 object ScalarCacheQueryExample {
+    /** Configuration file name. */
+    private val CONFIG = "examples/config/example-compute.xml"
+
     /** Cache name. */
-    private val CACHE_NAME = "partitioned" // "replicated"
+    private val NAME = ScalarCacheQueryExample.getClass.getSimpleName
 
     /**
      * Example entry point. No arguments required.
@@ -46,8 +49,16 @@ object ScalarCacheQueryExample {
      * @param args Command line arguments. None required.
      */
     def main(args: Array[String]) {
-        scalar("examples/config/example-cache.xml") {
-            example(ignite$)
+        scalar(CONFIG) {
+            val cache = createCache$(NAME, indexedTypes = Seq(classOf[UUID], classOf[Organization],
+                classOf[CacheAffinityKey[_]], classOf[Person]))
+
+            try {
+                example(ignite$)
+            }
+            finally {
+                cache.close()
+            }
         }
     }
 
@@ -85,14 +96,14 @@ object ScalarCacheQueryExample {
      *
      * @return Cache to use.
      */
-    private def mkCache[K, V]: IgniteCache[K, V] = cache$[K, V](CACHE_NAME).get
+    private def mkCache[K, V]: IgniteCache[K, V] = cache$[K, V](NAME).get
 
     /**
      * Populates cache with test data.
      */
     private def initialize() {
         // Clean up caches on all nodes before run.
-        cache$(CACHE_NAME).get.clear()
+        cache$(NAME).get.clear()
 
         // Organization cache projection.
         val orgCache = mkCache[UUID, Organization]

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarSnowflakeSchemaExample.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarSnowflakeSchemaExample.scala
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarSnowflakeSchemaExample.scala
index 8e7e434..46176bf 100644
--- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarSnowflakeSchemaExample.scala
+++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarSnowflakeSchemaExample.scala
@@ -17,15 +17,17 @@
 
 package org.apache.ignite.scalar.examples
 
+import org.apache.ignite.IgniteCache
+import org.apache.ignite.cache.CacheMode
 import org.apache.ignite.scalar.scalar
 import org.apache.ignite.scalar.scalar._
-
 import org.jdk8.backport.ThreadLocalRandom8
 
 import javax.cache.Cache
+import java.lang.{Integer => JavaInt}
 import java.util.ConcurrentModificationException
 
-import collection.JavaConversions._
+import scala.collection.JavaConversions._
 
 /**
  * <a href="http://en.wikipedia.org/wiki/Snowflake_schema">Snowflake Schema</a>
is a logical
@@ -47,35 +49,49 @@ import collection.JavaConversions._
  * cache: `'ignite.sh examples/config/example-cache.xml'`.
  */
 object ScalarSnowflakeSchemaExample {
+    /** Configuration file name. */
+    private val CONFIG = "examples/config/example-compute.xml"
+
     /** Name of replicated cache specified in spring configuration. */
-    private val REPL_CACHE_NAME = "replicated"
+    private val REPL_NAME = "ScalarSnowflakeSchemaExampleReplicated"
 
     /** Name of partitioned cache specified in spring configuration. */
-    private val PART_CACHE_NAME = "partitioned"
+    private val PART_NAME = "ScalarSnowflakeSchemaExamplePartitioned"
 
     /** ID generator. */
-    private[this] val idGen = Stream.from(0).iterator
+    private[this] val idGen = Stream.from(System.currentTimeMillis.toInt).iterator
 
     /** DimStore data. */
-    private[this] val dataStore = scala.collection.mutable.Map[Integer, DimStore]()
+    private[this] val dataStore = scala.collection.mutable.Map[JavaInt, DimStore]()
 
     /** DimProduct data. */
-    private[this] val dataProduct = scala.collection.mutable.Map[Integer, DimProduct]()
+    private[this] val dataProduct = scala.collection.mutable.Map[JavaInt, DimProduct]()
 
     /**
      * Example entry point. No arguments required.
      */
     def main(args: Array[String]) {
-        scalar("examples/config/example-cache.xml") {
-            // Clean up caches on all nodes before run.
-            cache$(REPL_CACHE_NAME).get.clear()
-            cache$(PART_CACHE_NAME).get.clear()
-
-            populateDimensions()
-            populateFacts()
-
-            queryStorePurchases()
-            queryProductPurchases()
+        scalar(CONFIG) {
+            val dimCache = createCache$[JavaInt, AnyRef](REPL_NAME, CacheMode.REPLICATED,
Seq(classOf[JavaInt], classOf[DimStore],
+                classOf[JavaInt], classOf[DimProduct]))
+
+            try {
+                val factCache = createCache$[JavaInt, FactPurchase](PART_NAME, indexedTypes
= Seq(classOf[JavaInt], classOf[FactPurchase]))
+
+                try {
+                    populateDimensions(dimCache)
+                    populateFacts(factCache)
+
+                    queryStorePurchases()
+                    queryProductPurchases()
+                }
+                finally {
+                    factCache.close()
+                }
+            }
+            finally {
+                dimCache.close()
+            }
         }
     }
 
@@ -83,9 +99,7 @@ object ScalarSnowflakeSchemaExample {
      * Populate cache with `dimensions` which in our case are
      * `DimStore` and `DimProduct` instances.
      */
-    def populateDimensions() {
-        val dimCache = ignite$.jcache[Int, Object](REPL_CACHE_NAME)
-
+    def populateDimensions(dimCache: IgniteCache[JavaInt, AnyRef]) {
         val store1 = new DimStore(idGen.next(), "Store1", "12345", "321 Chilly Dr, NY")
         val store2 = new DimStore(idGen.next(), "Store2", "54321", "123 Windy Dr, San Francisco")
 
@@ -108,10 +122,7 @@ object ScalarSnowflakeSchemaExample {
     /**
      * Populate cache with `facts`, which in our case are `FactPurchase` objects.
      */
-    def populateFacts() {
-        val dimCache = ignite$.jcache[Int, Object](REPL_CACHE_NAME)
-        val factCache = ignite$.jcache[Int, FactPurchase](PART_CACHE_NAME)
-
+    def populateFacts(factCache: IgniteCache[JavaInt, FactPurchase]) {
         for (i <- 1 to 100) {
             val store: DimStore = rand(dataStore.values)
             val prod: DimProduct = rand(dataProduct.values)
@@ -127,10 +138,10 @@ object ScalarSnowflakeSchemaExample {
      * `FactPurchase` objects stored in `partitioned` cache.
      */
     def queryStorePurchases() {
-        val factCache = ignite$.jcache[Int, FactPurchase](PART_CACHE_NAME)
+        val factCache = ignite$.jcache[JavaInt, FactPurchase](PART_NAME)
 
         val storePurchases = factCache.sql(
-            "from \"replicated\".DimStore, \"partitioned\".FactPurchase " +
+            "from \"" + REPL_NAME + "\".DimStore, \"" + PART_NAME + "\".FactPurchase " +
             "where DimStore.id=FactPurchase.storeId and DimStore.name=?", "Store1")
 
         printQueryResults("All purchases made at store1:", storePurchases.getAll)
@@ -143,7 +154,7 @@ object ScalarSnowflakeSchemaExample {
      * stored in `partitioned` cache.
      */
     private def queryProductPurchases() {
-        val factCache = ignite$.jcache[Int, FactPurchase](PART_CACHE_NAME)
+        val factCache = ignite$.jcache[JavaInt, FactPurchase](PART_NAME)
 
         // All purchases for certain product made at store2.
         // =================================================
@@ -154,7 +165,8 @@ object ScalarSnowflakeSchemaExample {
         println("IDs of products [p1=" + p1.id + ", p2=" + p2.id + ", p3=" + p3.id + ']')
 
         val prodPurchases = factCache.sql(
-            "from \"replicated\".DimStore, \"replicated\".DimProduct, \"partitioned\".FactPurchase
" +
+            "from \"" + REPL_NAME + "\".DimStore, \"" + REPL_NAME + "\".DimProduct, \"" +
+                PART_NAME + "\".FactPurchase " +
             "where DimStore.id=FactPurchase.storeId and " +
                 "DimProduct.id=FactPurchase.productId and " +
                 "DimStore.name=? and DimProduct.id in(?, ?, ?)",
@@ -169,7 +181,7 @@ object ScalarSnowflakeSchemaExample {
      * @param msg Initial message.
      * @param res Results to print.
      */
-    private def printQueryResults[V](msg: String, res: Iterable[Cache.Entry[Int, V]]) {
+    private def printQueryResults[V](msg: String, res: Iterable[Cache.Entry[JavaInt, V]])
{
         println(msg)
 
         for (e <- res)

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7137c3c/modules/scalar/src/main/scala/org/apache/ignite/scalar/scalar.scala
----------------------------------------------------------------------
diff --git a/modules/scalar/src/main/scala/org/apache/ignite/scalar/scalar.scala b/modules/scalar/src/main/scala/org/apache/ignite/scalar/scalar.scala
index dc54c1b..ae2a737 100644
--- a/modules/scalar/src/main/scala/org/apache/ignite/scalar/scalar.scala
+++ b/modules/scalar/src/main/scala/org/apache/ignite/scalar/scalar.scala
@@ -17,15 +17,11 @@
 
 package org.apache.ignite.scalar
 
-import org.apache.ignite.cache.GridCache
-import org.apache.ignite.cache.query.annotations.{QuerySqlField, QueryTextField}
-import java.net.URL
-import java.util.UUID
-
 import org.apache.ignite._
+import org.apache.ignite.cache.CacheMode
 import org.apache.ignite.cache.query.annotations.{QuerySqlField, QueryTextField}
 import org.apache.ignite.cluster.ClusterNode
-import org.apache.ignite.configuration.IgniteConfiguration
+import org.apache.ignite.configuration.{CacheConfiguration, IgniteConfiguration}
 import org.apache.ignite.internal.IgniteVersionUtils._
 import org.jetbrains.annotations.Nullable
 
@@ -279,6 +275,22 @@ object scalar extends ScalarConversions {
         Option(Ignition.ignite.jcache(cacheName))
 
     /**
+     * Creates cache cache with specified parameters in default grid.
+     *
+     * @param cacheName Name of the cache to get.
+     */
+    @inline def createCache$[K, V](@Nullable cacheName: String, cacheMode: CacheMode = CacheMode.PARTITIONED,
+        indexedTypes: Seq[Class[_]] = Seq.empty): IgniteCache[K, V] = {
+        val cfg = new CacheConfiguration[K, V]()
+
+        cfg.setName(cacheName)
+        cfg.setCacheMode(cacheMode)
+        cfg.setIndexedTypes(indexedTypes:_*)
+
+        Ignition.ignite.createCache(cfg)
+    }
+
+    /**
      * Gets named cache from specified grid.
      *
      * @param gridName Name of the grid.


Mime
View raw message