spark-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject [2/3] git commit: Responses to review
Date Tue, 08 Oct 2013 03:46:11 GMT
Responses to review


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/8b377718
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/8b377718
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/8b377718

Branch: refs/heads/master
Commit: 8b377718b85d31fe5b0efb0ad77a8f38ffcede89
Parents: 391133f
Author: Patrick Wendell <pwendell@gmail.com>
Authored: Mon Oct 7 20:03:35 2013 -0700
Committer: Patrick Wendell <pwendell@gmail.com>
Committed: Mon Oct 7 20:03:35 2013 -0700

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/CacheManager.scala | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/8b377718/core/src/main/scala/org/apache/spark/CacheManager.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/CacheManager.scala b/core/src/main/scala/org/apache/spark/CacheManager.scala
index 048168c..4cf7eb9 100644
--- a/core/src/main/scala/org/apache/spark/CacheManager.scala
+++ b/core/src/main/scala/org/apache/spark/CacheManager.scala
@@ -34,7 +34,7 @@ private[spark] class CacheManager(blockManager: BlockManager) extends Logging
{
   def getOrCompute[T](rdd: RDD[T], split: Partition, context: TaskContext, storageLevel:
StorageLevel)
       : Iterator[T] = {
     val key = "rdd_%d_%d".format(rdd.id, split.index)
-    logInfo("Looking for partition " + key)
+    logDebug("Looking for partition " + key)
     blockManager.get(key) match {
       case Some(values) =>
         // Partition is already materialized, so just return its values
@@ -44,11 +44,11 @@ private[spark] class CacheManager(blockManager: BlockManager) extends
Logging {
         // Mark the split as loading (unless someone else marks it first)
         loading.synchronized {
           if (loading.contains(key)) {
-            logInfo("Loading contains " + key + ", waiting...")
+            logInfo("Another thread is loading %s, waiting for it to finish...".format(key))
             while (loading.contains(key)) {
               try {loading.wait()} catch {case _ : Throwable =>}
             }
-            logInfo("Loading no longer contains " + key + ", so returning cached result")
+            logInfo("Finished waiting for %s".format(key))
             // See whether someone else has successfully loaded it. The main way this would
fail
             // is for the RDD-level cache eviction policy if someone else has loaded the
same RDD
             // partition but we didn't want to make space for it. However, that case is unlikely
@@ -58,7 +58,7 @@ private[spark] class CacheManager(blockManager: BlockManager) extends Logging
{
               case Some(values) =>
                 return values.asInstanceOf[Iterator[T]]
               case None =>
-                logInfo("Whoever was loading " + key + " failed; we'll try it ourselves")
+                logInfo("Whoever was loading %s failed; we'll try it ourselves".format(key))
                 loading.add(key)
             }
           } else {


Mime
View raw message