lens-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From amareshw...@apache.org
Subject incubator-lens git commit: LENS-406 : CubeMetastoreClient.PartitionTimeline.get should be synchronized ( Rajat Khandelwal via amareshwari)
Date Fri, 13 Mar 2015 10:11:59 GMT
Repository: incubator-lens
Updated Branches:
  refs/heads/master afc440005 -> 6a5e5111c


LENS-406 : CubeMetastoreClient.PartitionTimeline.get should be synchronized ( Rajat Khandelwal
via amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/incubator-lens/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-lens/commit/6a5e5111
Tree: http://git-wip-us.apache.org/repos/asf/incubator-lens/tree/6a5e5111
Diff: http://git-wip-us.apache.org/repos/asf/incubator-lens/diff/6a5e5111

Branch: refs/heads/master
Commit: 6a5e5111c84f54c8abef76fb08e190c20589f752
Parents: afc4400
Author: Rajat Khandelwal <prongs@apache.org>
Authored: Fri Mar 13 15:40:52 2015 +0530
Committer: Amareshwari Sriramadasu <amareshwari@apache.org>
Committed: Fri Mar 13 15:40:52 2015 +0530

----------------------------------------------------------------------
 .../lens/cube/metadata/CubeMetastoreClient.java | 74 +++++++++++---------
 1 file changed, 40 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/6a5e5111/lens-cube/src/main/java/org/apache/lens/cube/metadata/CubeMetastoreClient.java
----------------------------------------------------------------------
diff --git a/lens-cube/src/main/java/org/apache/lens/cube/metadata/CubeMetastoreClient.java
b/lens-cube/src/main/java/org/apache/lens/cube/metadata/CubeMetastoreClient.java
index 8797bfe..10e8ec0 100644
--- a/lens-cube/src/main/java/org/apache/lens/cube/metadata/CubeMetastoreClient.java
+++ b/lens-cube/src/main/java/org/apache/lens/cube/metadata/CubeMetastoreClient.java
@@ -170,50 +170,56 @@ public class CubeMetastoreClient {
      */
     public TreeMap<UpdatePeriod, CaseInsensitiveStringHashMap<PartitionTimeline>>
get(String fact, String storage)
       throws HiveException, LensException {
+      // SUSPEND CHECKSTYLE CHECK DoubleCheckedLockingCheck
       String storageTableName = MetastoreUtil.getStorageTableName(fact, Storage.getPrefix(storage));
       if (get(storageTableName) == null) {
-        log.info("loading timeline from all partitions for storage table: " + storageTableName);
-        // not found in memory, try loading from table properties.
-        Table storageTable = getTable(storageTableName);
-        if (storageTable.getParameters().get(MetastoreUtil.getPartitoinTimelineCachePresenceKey())
== null) {
-          // Not found in table properties either, compute from all partitions of the fact-storage
table.
-          // First make sure all combinations of update period and partition column have
an entry even
-          // if no partitions exist
-          if (getCubeFact(fact).getUpdatePeriods() != null && getCubeFact(fact).getUpdatePeriods().get(
-            storage) != null) {
-            for (UpdatePeriod updatePeriod : getCubeFact(fact).getUpdatePeriods().get(storage))
{
-              for (String partCol : getTimePartsOfTable(storageTable)) {
-                partitionTimelineCache.ensureEntry(storageTableName, updatePeriod, partCol);
+        synchronized (this) {
+          if (get(storageTableName) == null) {
+            log.info("loading timeline from all partitions for storage table: " + storageTableName);
+            // not found in memory, try loading from table properties.
+            Table storageTable = getTable(storageTableName);
+            if (storageTable.getParameters().get(MetastoreUtil.getPartitoinTimelineCachePresenceKey())
== null) {
+              // Not found in table properties either, compute from all partitions of the
fact-storage table.
+              // First make sure all combinations of update period and partition column have
an entry even
+              // if no partitions exist
+              if (getCubeFact(fact).getUpdatePeriods() != null && getCubeFact(fact).getUpdatePeriods().get(
+                storage) != null) {
+                for (UpdatePeriod updatePeriod : getCubeFact(fact).getUpdatePeriods().get(storage))
{
+                  for (String partCol : getTimePartsOfTable(storageTable)) {
+                    partitionTimelineCache.ensureEntry(storageTableName, updatePeriod, partCol);
+                  }
+                }
               }
-            }
-          }
-          // Then add all existing partitions for batch addition in respective timelines.
-          List<String> timeParts = getTimePartsOfTable(storageTable);
-          List<FieldSchema> partCols = storageTable.getPartCols();
-          for (Partition partition : getPartitionsByFilter(storageTableName, null)) {
-            UpdatePeriod period = deduceUpdatePeriod(partition);
-            List<String> values = partition.getValues();
-            for (int i = 0; i < partCols.size(); i++) {
-              if (timeParts.contains(partCols.get(i).getName())) {
-                partitionTimelineCache.addForBatchAddition(storageTableName, period, partCols.get(i).getName(),
-                  values.get(i));
+              // Then add all existing partitions for batch addition in respective timelines.
+              List<String> timeParts = getTimePartsOfTable(storageTable);
+              List<FieldSchema> partCols = storageTable.getPartCols();
+              for (Partition partition : getPartitionsByFilter(storageTableName, null)) {
+                UpdatePeriod period = deduceUpdatePeriod(partition);
+                List<String> values = partition.getValues();
+                for (int i = 0; i < partCols.size(); i++) {
+                  if (timeParts.contains(partCols.get(i).getName())) {
+                    partitionTimelineCache.addForBatchAddition(storageTableName, period,
partCols.get(i).getName(),
+                      values.get(i));
+                  }
+                }
+              }
+              // commit all batch addition for the storage table,
+              // which will in-turn commit all batch additions in all it's timelines.
+              commitAllBatchAdditions(storageTableName);
+            } else {
+              // found in table properties, load from there.
+              for (UpdatePeriod updatePeriod : getCubeFact(fact).getUpdatePeriods().get(storage))
{
+                for (String partCol : getTimePartsOfTable(storageTableName)) {
+                  ensureEntry(storageTableName, updatePeriod, partCol).init(storageTable);
+                }
               }
-            }
-          }
-          // commit all batch addition for the storage table, which will in-turn commit all
batch additions in all it's
-          // timelines.
-          commitAllBatchAdditions(storageTableName);
-        } else {
-          // found in table properties, load from there.
-          for (UpdatePeriod updatePeriod : getCubeFact(fact).getUpdatePeriods().get(storage))
{
-            for (String partCol : getTimePartsOfTable(storageTableName)) {
-              ensureEntry(storageTableName, updatePeriod, partCol).init(storageTable);
             }
           }
         }
       }
       // return the final value from memory
       return get(storageTableName);
+      // RESUME CHECKSTYLE CHECK DoubleCheckedLockingCheck
     }
 
     /**


Mime
View raw message