Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 30F722004F1 for ; Wed, 30 Aug 2017 17:13:58 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 2F4C41693FB; Wed, 30 Aug 2017 15:13:58 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 120641693E2 for ; Wed, 30 Aug 2017 17:13:55 +0200 (CEST) Received: (qmail 46460 invoked by uid 500); 30 Aug 2017 15:13:51 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 44491 invoked by uid 99); 30 Aug 2017 15:13:50 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 30 Aug 2017 15:13:50 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9F54BF557F; Wed, 30 Aug 2017 15:13:47 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Wed, 30 Aug 2017 15:13:51 -0000 Message-Id: <453f5be36aad48f2aad1c66710fe3b85@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [05/51] [partial] hbase-site git commit: Published site at . archived-at: Wed, 30 Aug 2017 15:13:58 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0d6dd914/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html index c234c4c..13adb57 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html @@ -729,7481 +729,7480 @@ 721 this.baseConf = confParam; 722 this.conf = new CompoundConfiguration() 723 .add(confParam) -724 .addStringMap(htd.getConfiguration()) -725 .addBytesMap(htd.getValues()); -726 this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, -727 DEFAULT_CACHE_FLUSH_INTERVAL); -728 this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES); -729 if (this.flushPerChanges > MAX_FLUSH_PER_CHANGES) { -730 throw new IllegalArgumentException(MEMSTORE_FLUSH_PER_CHANGES + " can not exceed " -731 + MAX_FLUSH_PER_CHANGES); -732 } -733 this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration", -734 DEFAULT_ROWLOCK_WAIT_DURATION); -735 -736 this.isLoadingCfsOnDemandDefault = conf.getBoolean(LOAD_CFS_ON_DEMAND_CONFIG_KEY, true); -737 this.htableDescriptor = htd; -738 Set<byte[]> families = this.htableDescriptor.getColumnFamilyNames(); -739 for (byte[] family : families) { -740 if (!replicationScope.containsKey(family)) { -741 int scope = htd.getColumnFamily(family).getScope(); -742 // Only store those families that has NON-DEFAULT scope -743 if (scope != REPLICATION_SCOPE_LOCAL) { -744 // Do a copy before storing it here. -745 replicationScope.put(Bytes.copy(family), scope); -746 } -747 } -748 } -749 this.rsServices = rsServices; -750 this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); -751 setHTableSpecificConf(); -752 this.scannerReadPoints = new ConcurrentHashMap<>(); -753 -754 this.busyWaitDuration = conf.getLong( -755 "hbase.busy.wait.duration", DEFAULT_BUSY_WAIT_DURATION); -756 this.maxBusyWaitMultiplier = conf.getInt("hbase.busy.wait.multiplier.max", 2); -757 if (busyWaitDuration * maxBusyWaitMultiplier <= 0L) { -758 throw new IllegalArgumentException("Invalid hbase.busy.wait.duration (" -759 + busyWaitDuration + ") or hbase.busy.wait.multiplier.max (" -760 + maxBusyWaitMultiplier + "). Their product should be positive"); -761 } -762 this.maxBusyWaitDuration = conf.getLong("hbase.ipc.client.call.purge.timeout", -763 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -764 -765 /* -766 * timestamp.slop provides a server-side constraint on the timestamp. This -767 * assumes that you base your TS around currentTimeMillis(). In this case, -768 * throw an error to the user if the user-specified TS is newer than now + -769 * slop. LATEST_TIMESTAMP == don't use this functionality -770 */ -771 this.timestampSlop = conf.getLong( -772 "hbase.hregion.keyvalue.timestamp.slop.millisecs", -773 HConstants.LATEST_TIMESTAMP); -774 -775 /** -776 * Timeout for the process time in processRowsWithLocks(). -777 * Use -1 to switch off time bound. -778 */ -779 this.rowProcessorTimeout = conf.getLong( -780 "hbase.hregion.row.processor.timeout", DEFAULT_ROW_PROCESSOR_TIMEOUT); -781 this.durability = htd.getDurability() == Durability.USE_DEFAULT -782 ? DEFAULT_DURABILITY -783 : htd.getDurability(); -784 if (rsServices != null) { -785 this.rsAccounting = this.rsServices.getRegionServerAccounting(); -786 // don't initialize coprocessors if not running within a regionserver -787 // TODO: revisit if coprocessors should load in other cases -788 this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf); -789 this.metricsRegionWrapper = new MetricsRegionWrapperImpl(this); -790 this.metricsRegion = new MetricsRegion(this.metricsRegionWrapper); -791 -792 Map<String, Region> recoveringRegions = rsServices.getRecoveringRegions(); -793 String encodedName = getRegionInfo().getEncodedName(); -794 if (recoveringRegions != null && recoveringRegions.containsKey(encodedName)) { -795 this.recovering = true; -796 recoveringRegions.put(encodedName, this); -797 } -798 } else { -799 this.metricsRegionWrapper = null; -800 this.metricsRegion = null; -801 } -802 if (LOG.isDebugEnabled()) { -803 // Write out region name as string and its encoded name. -804 LOG.debug("Instantiated " + this); -805 } -806 -807 // by default, we allow writes against a region when it's in recovering -808 this.disallowWritesInRecovering = -809 conf.getBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, -810 HConstants.DEFAULT_DISALLOW_WRITES_IN_RECOVERING_CONFIG); -811 configurationManager = Optional.absent(); -812 -813 // disable stats tracking system tables, but check the config for everything else -814 this.regionStatsEnabled = htd.getTableName().getNamespaceAsString().equals( -815 NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) ? -816 false : -817 conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, -818 HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); -819 -820 this.maxCellSize = conf.getLong(HBASE_MAX_CELL_SIZE_KEY, DEFAULT_MAX_CELL_SIZE); -821 -822 boolean unassignForFNFE = -823 conf.getBoolean(HREGION_UNASSIGN_FOR_FNFE, DEFAULT_HREGION_UNASSIGN_FOR_FNFE); -824 if (unassignForFNFE) { -825 this.regionUnassigner = new RegionUnassigner(rsServices, fs.getRegionInfo()); -826 } else { -827 this.regionUnassigner = null; -828 } -829 } -830 -831 void setHTableSpecificConf() { -832 if (this.htableDescriptor == null) return; -833 long flushSize = this.htableDescriptor.getMemStoreFlushSize(); -834 -835 if (flushSize <= 0) { -836 flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, -837 TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); -838 } -839 this.memstoreFlushSize = flushSize; -840 this.blockingMemStoreSize = this.memstoreFlushSize * -841 conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, -842 HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); -843 } -844 -845 /** -846 * Initialize this region. -847 * Used only by tests and SplitTransaction to reopen the region. -848 * You should use createHRegion() or openHRegion() -849 * @return What the next sequence (edit) id should be. -850 * @throws IOException e -851 * @deprecated use HRegion.createHRegion() or HRegion.openHRegion() -852 */ -853 @Deprecated -854 public long initialize() throws IOException { -855 return initialize(null); -856 } -857 -858 /** -859 * Initialize this region. -860 * -861 * @param reporter Tickle every so often if initialize is taking a while. -862 * @return What the next sequence (edit) id should be. -863 * @throws IOException e -864 */ -865 private long initialize(final CancelableProgressable reporter) throws IOException { -866 -867 //Refuse to open the region if there is no column family in the table -868 if (htableDescriptor.getColumnFamilyCount() == 0) { -869 throw new DoNotRetryIOException("Table " + htableDescriptor.getTableName().getNameAsString()+ -870 " should have at least one column family."); -871 } -872 -873 MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); -874 long nextSeqId = -1; -875 try { -876 nextSeqId = initializeRegionInternals(reporter, status); -877 return nextSeqId; -878 } finally { -879 // nextSeqid will be -1 if the initialization fails. -880 // At least it will be 0 otherwise. -881 if (nextSeqId == -1) { -882 status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + -883 " initialization."); -884 } -885 } -886 } -887 -888 private long initializeRegionInternals(final CancelableProgressable reporter, -889 final MonitoredTask status) throws IOException { -890 if (coprocessorHost != null) { -891 status.setStatus("Running coprocessor pre-open hook"); -892 coprocessorHost.preOpen(); -893 } -894 -895 // Write HRI to a file in case we need to recover hbase:meta -896 // Only the primary replica should write .regioninfo -897 if (this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { -898 status.setStatus("Writing region info on filesystem"); -899 fs.checkRegionInfoOnFilesystem(); -900 } else { -901 if (LOG.isDebugEnabled()) { -902 LOG.debug("Skipping creation of .regioninfo file for " + this.getRegionInfo()); -903 } -904 } -905 -906 // Initialize all the HStores -907 status.setStatus("Initializing all the Stores"); -908 long maxSeqId = initializeStores(reporter, status); -909 this.mvcc.advanceTo(maxSeqId); -910 if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) { -911 List<Store> stores = this.getStores(); // update the stores that we are replaying -912 try { -913 for (Store store : stores) { -914 ((HStore) store).startReplayingFromWAL(); -915 } -916 // Recover any edits if available. -917 maxSeqId = Math.max(maxSeqId, -918 replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status)); -919 // Make sure mvcc is up to max. -920 this.mvcc.advanceTo(maxSeqId); -921 } finally { -922 for (Store store : stores) { // update the stores that we are done replaying -923 ((HStore)store).stopReplayingFromWAL(); -924 } -925 } -926 -927 } -928 this.lastReplayedOpenRegionSeqId = maxSeqId; -929 -930 this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this)); -931 this.writestate.flushRequested = false; -932 this.writestate.compacting.set(0); -933 -934 if (this.writestate.writesEnabled) { -935 // Remove temporary data left over from old regions -936 status.setStatus("Cleaning up temporary data from old regions"); -937 fs.cleanupTempDir(); -938 } -939 -940 if (this.writestate.writesEnabled) { -941 status.setStatus("Cleaning up detritus from prior splits"); -942 // Get rid of any splits or merges that were lost in-progress. Clean out -943 // these directories here on open. We may be opening a region that was -944 // being split but we crashed in the middle of it all. -945 fs.cleanupAnySplitDetritus(); -946 fs.cleanupMergesDir(); -947 } -948 -949 // Initialize split policy -950 this.splitPolicy = RegionSplitPolicy.create(this, conf); -951 -952 // Initialize flush policy -953 this.flushPolicy = FlushPolicyFactory.create(this, conf); -954 -955 long lastFlushTime = EnvironmentEdgeManager.currentTime(); -956 for (Store store: stores.values()) { -957 this.lastStoreFlushTimeMap.put(store, lastFlushTime); -958 } -959 -960 // Use maximum of log sequenceid or that which was found in stores -961 // (particularly if no recovered edits, seqid will be -1). -962 long nextSeqid = maxSeqId; -963 -964 // In distributedLogReplay mode, we don't know the last change sequence number because region -965 // is opened before recovery completes. So we add a safety bumper to avoid new sequence number -966 // overlaps used sequence numbers -967 if (this.writestate.writesEnabled) { -968 nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs -969 .getRegionDir(), nextSeqid, (this.recovering ? (this.flushPerChanges + 10000000) : 1)); -970 } else { -971 nextSeqid++; -972 } -973 -974 LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() + -975 "; next sequenceid=" + nextSeqid); -976 -977 // A region can be reopened if failed a split; reset flags -978 this.closing.set(false); -979 this.closed.set(false); -980 -981 if (coprocessorHost != null) { -982 status.setStatus("Running coprocessor post-open hooks"); -983 coprocessorHost.postOpen(); -984 } -985 -986 status.markComplete("Region opened successfully"); -987 return nextSeqid; -988 } -989 -990 /** -991 * Open all Stores. -992 * @param reporter -993 * @param status -994 * @return Highest sequenceId found out in a Store. -995 * @throws IOException -996 */ -997 private long initializeStores(final CancelableProgressable reporter, MonitoredTask status) -998 throws IOException { -999 // Load in all the HStores. -1000 -1001 long maxSeqId = -1; -1002 // initialized to -1 so that we pick up MemstoreTS from column families -1003 long maxMemstoreTS = -1; -1004 -1005 if (htableDescriptor.getColumnFamilyCount() != 0) { -1006 // initialize the thread pool for opening stores in parallel. -1007 ThreadPoolExecutor storeOpenerThreadPool = -1008 getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog()); -1009 CompletionService<HStore> completionService = new ExecutorCompletionService<>(storeOpenerThreadPool); -1010 -1011 // initialize each store in parallel -1012 for (final ColumnFamilyDescriptor family : htableDescriptor.getColumnFamilies()) { -1013 status.setStatus("Instantiating store for column family " + family); -1014 completionService.submit(new Callable<HStore>() { -1015 @Override -1016 public HStore call() throws IOException { -1017 return instantiateHStore(family); -1018 } -1019 }); -1020 } -1021 boolean allStoresOpened = false; -1022 boolean hasSloppyStores = false; -1023 try { -1024 for (int i = 0; i < htableDescriptor.getColumnFamilyCount(); i++) { -1025 Future<HStore> future = completionService.take(); -1026 HStore store = future.get(); -1027 this.stores.put(store.getColumnFamilyDescriptor().getName(), store); -1028 if (store.isSloppyMemstore()) { -1029 hasSloppyStores = true; -1030 } -1031 -1032 long storeMaxSequenceId = store.getMaxSequenceId(); -1033 maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), -1034 storeMaxSequenceId); -1035 if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { -1036 maxSeqId = storeMaxSequenceId; -1037 } -1038 long maxStoreMemstoreTS = store.getMaxMemstoreTS(); -1039 if (maxStoreMemstoreTS > maxMemstoreTS) { -1040 maxMemstoreTS = maxStoreMemstoreTS; -1041 } -1042 } -1043 allStoresOpened = true; -1044 if(hasSloppyStores) { -1045 htableDescriptor = TableDescriptorBuilder.newBuilder(htableDescriptor) -1046 .setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class.getName()) -1047 .build(); -1048 LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this); -1049 } -1050 } catch (InterruptedException e) { -1051 throw (InterruptedIOException)new InterruptedIOException().initCause(e); -1052 } catch (ExecutionException e) { -1053 throw new IOException(e.getCause()); -1054 } finally { -1055 storeOpenerThreadPool.shutdownNow(); -1056 if (!allStoresOpened) { -1057 // something went wrong, close all opened stores -1058 LOG.error("Could not initialize all stores for the region=" + this); -1059 for (Store store : this.stores.values()) { -1060 try { -1061 store.close(); -1062 } catch (IOException e) { -1063 LOG.warn(e.getMessage()); -1064 } -1065 } -1066 } -1067 } -1068 } -1069 return Math.max(maxSeqId, maxMemstoreTS + 1); -1070 } -1071 -1072 private void initializeWarmup(final CancelableProgressable reporter) throws IOException { -1073 MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); -1074 // Initialize all the HStores -1075 status.setStatus("Warming up all the Stores"); -1076 try { -1077 initializeStores(reporter, status); -1078 } finally { -1079 status.markComplete("Done warming up."); -1080 } -1081 } -1082 -1083 /** -1084 * @return Map of StoreFiles by column family -1085 */ -1086 private NavigableMap<byte[], List<Path>> getStoreFiles() { -1087 NavigableMap<byte[], List<Path>> allStoreFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR); -1088 for (Store store: getStores()) { -1089 Collection<StoreFile> storeFiles = store.getStorefiles(); -1090 if (storeFiles == null) continue; -1091 List<Path> storeFileNames = new ArrayList<>(); -1092 for (StoreFile storeFile: storeFiles) { -1093 storeFileNames.add(storeFile.getPath()); -1094 } -1095 allStoreFiles.put(store.getColumnFamilyDescriptor().getName(), storeFileNames); -1096 } -1097 return allStoreFiles; -1098 } -1099 -1100 private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException { -1101 Map<byte[], List<Path>> storeFiles = getStoreFiles(); -1102 RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( -1103 RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId, -1104 getRegionServerServices().getServerName(), storeFiles); -1105 WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionOpenDesc, -1106 mvcc); -1107 } -1108 -1109 private void writeRegionCloseMarker(WAL wal) throws IOException { -1110 Map<byte[], List<Path>> storeFiles = getStoreFiles(); -1111 RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor( -1112 RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), mvcc.getReadPoint(), -1113 getRegionServerServices().getServerName(), storeFiles); -1114 WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionEventDesc, -1115 mvcc); -1116 -1117 // Store SeqId in HDFS when a region closes -1118 // checking region folder exists is due to many tests which delete the table folder while a -1119 // table is still online -1120 if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) { -1121 WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(), -1122 mvcc.getReadPoint(), 0); -1123 } -1124 } -1125 -1126 /** -1127 * @return True if this region has references. -1128 */ -1129 public boolean hasReferences() { -1130 for (Store store : this.stores.values()) { -1131 if (store.hasReferences()) return true; -1132 } -1133 return false; -1134 } -1135 -1136 public void blockUpdates() { -1137 this.updatesLock.writeLock().lock(); -1138 } -1139 -1140 public void unblockUpdates() { -1141 this.updatesLock.writeLock().unlock(); -1142 } -1143 -1144 @Override -1145 public HDFSBlocksDistribution getHDFSBlocksDistribution() { -1146 HDFSBlocksDistribution hdfsBlocksDistribution = -1147 new HDFSBlocksDistribution(); -1148 synchronized (this.stores) { -1149 for (Store store : this.stores.values()) { -1150 Collection<StoreFile> storeFiles = store.getStorefiles(); -1151 if (storeFiles == null) continue; -1152 for (StoreFile sf : storeFiles) { -1153 HDFSBlocksDistribution storeFileBlocksDistribution = -1154 sf.getHDFSBlockDistribution(); -1155 hdfsBlocksDistribution.add(storeFileBlocksDistribution); -1156 } -1157 } -1158 } -1159 return hdfsBlocksDistribution; -1160 } -1161 -1162 /** -1163 * This is a helper function to compute HDFS block distribution on demand -1164 * @param conf configuration -1165 * @param tableDescriptor TableDescriptor of the table -1166 * @param regionInfo encoded name of the region -1167 * @return The HDFS blocks distribution for the given region. -1168 * @throws IOException -1169 */ -1170 public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, -1171 final TableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException { -1172 Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDescriptor.getTableName()); -1173 return computeHDFSBlocksDistribution(conf, tableDescriptor, regionInfo, tablePath); -1174 } -1175 -1176 /** -1177 * This is a helper function to compute HDFS block distribution on demand -1178 * @param conf configuration -1179 * @param tableDescriptor TableDescriptor of the table -1180 * @param regionInfo encoded name of the region -1181 * @param tablePath the table directory -1182 * @return The HDFS blocks distribution for the given region. -1183 * @throws IOException -1184 */ -1185 public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, -1186 final TableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath) -1187 throws IOException { -1188 HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); -1189 FileSystem fs = tablePath.getFileSystem(conf); -1190 -1191 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo); -1192 for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) { -1193 List<LocatedFileStatus> locatedFileStatusList = HRegionFileSystem -1194 .getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), true); -1195 if (locatedFileStatusList == null) { -1196 continue; -1197 } -1198 -1199 for (LocatedFileStatus status : locatedFileStatusList) { -1200 Path p = status.getPath(); -1201 if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) { -1202 // Only construct StoreFileInfo object if its not a hfile, save obj -1203 // creation -1204 StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status); -1205 hdfsBlocksDistribution.add(storeFileInfo -1206 .computeHDFSBlocksDistribution(fs)); -1207 } else if (StoreFileInfo.isHFile(p)) { -1208 // If its a HFile, then lets just add to the block distribution -1209 // lets not create more objects here, not even another HDFSBlocksDistribution -1210 FSUtils.addToHDFSBlocksDistribution(hdfsBlocksDistribution, -1211 status.getBlockLocations()); -1212 } else { -1213 throw new IOException("path=" + p -1214 + " doesn't look like a valid StoreFile"); -1215 } -1216 } -1217 } -1218 return hdfsBlocksDistribution; -1219 } -1220 -1221 /** -1222 * Increase the size of mem store in this region and the size of global mem -1223 * store -1224 * @return the size of memstore in this region -1225 */ -1226 public long addAndGetMemstoreSize(MemstoreSize memstoreSize) { -1227 if (this.rsAccounting != null) { -1228 rsAccounting.incGlobalMemstoreSize(memstoreSize); -1229 } -1230 long size = this.memstoreDataSize.addAndGet(memstoreSize.getDataSize()); -1231 checkNegativeMemstoreDataSize(size, memstoreSize.getDataSize()); -1232 return size; -1233 } -1234 -1235 public void decrMemstoreSize(MemstoreSize memstoreSize) { -1236 if (this.rsAccounting != null) { -1237 rsAccounting.decGlobalMemstoreSize(memstoreSize); -1238 } -1239 long size = this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize()); -1240 checkNegativeMemstoreDataSize(size, -memstoreSize.getDataSize()); -1241 } -1242 -1243 private void checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) { -1244 // This is extremely bad if we make memstoreSize negative. Log as much info on the offending -1245 // caller as possible. (memStoreSize might be a negative value already -- freeing memory) -1246 if (memstoreDataSize < 0) { -1247 LOG.error("Asked to modify this region's (" + this.toString() -1248 + ") memstoreSize to a negative value which is incorrect. Current memstoreSize=" -1249 + (memstoreDataSize - delta) + ", delta=" + delta, new Exception()); -1250 } -1251 } -1252 -1253 @Override -1254 public HRegionInfo getRegionInfo() { -1255 return this.fs.getRegionInfo(); -1256 } -1257 -1258 /** -1259 * @return Instance of {@link RegionServerServices} used by this HRegion. -1260 * Can be null. -1261 */ -1262 RegionServerServices getRegionServerServices() { -1263 return this.rsServices; -1264 } -1265 -1266 @Override -1267 public long getReadRequestsCount() { -1268 return readRequestsCount.sum(); -1269 } -1270 -1271 @Override -1272 public void updateReadRequestsCount(long i) { -1273 readRequestsCount.add(i); -1274 } -1275 -1276 @Override -1277 public long getFilteredReadRequestsCount() { -1278 return filteredReadRequestsCount.sum(); -1279 } -1280 -1281 @Override -1282 public long getWriteRequestsCount() { -1283 return writeRequestsCount.sum(); -1284 } -1285 -1286 @Override -1287 public void updateWriteRequestsCount(long i) { -1288 writeRequestsCount.add(i); -1289 } -1290 -1291 @Override -1292 public long getMemstoreSize() { -1293 return memstoreDataSize.get(); -1294 } -1295 -1296 @Override -1297 public RegionServicesForStores getRegionServicesForStores() { -1298 return regionServicesForStores; -1299 } -1300 -1301 @Override -1302 public long getNumMutationsWithoutWAL() { -1303 return numMutationsWithoutWAL.sum(); -1304 } -1305 -1306 @Override -1307 public long getDataInMemoryWithoutWAL() { -1308 return dataInMemoryWithoutWAL.sum(); -1309 } -1310 -1311 @Override -1312 public long getBlockedRequestsCount() { -1313 return blockedRequestsCount.sum(); -1314 } -1315 -1316 @Override -1317 public long getCheckAndMutateChecksPassed() { -1318 return checkAndMutateChecksPassed.sum(); -1319 } -1320 -1321 @Override -1322 public long getCheckAndMutateChecksFailed() { -1323 return checkAndMutateChecksFailed.sum(); -1324 } -1325 -1326 @Override -1327 public MetricsRegion getMetrics() { -1328 return metricsRegion; -1329 } -1330 -1331 @Override -1332 public boolean isClosed() { -1333 return this.closed.get(); -1334 } -1335 -1336 @Override -1337 public boolean isClosing() { -1338 return this.closing.get(); -1339 } -1340 -1341 @Override -1342 public boolean isReadOnly() { -1343 return this.writestate.isReadOnly(); -1344 } -1345 -1346 /** -1347 * Reset recovering state of current region -1348 */ -1349 public void setRecovering(boolean newState) { -1350 boolean wasRecovering = this.recovering; -1351 // Before we flip the recovering switch (enabling reads) we should write the region open -1352 // event to WAL if needed -1353 if (wal != null && getRegionServerServices() != null && !writestate.readOnly -1354 && wasRecovering && !newState) { -1355 -1356 // force a flush only if region replication is set up for this region. Otherwise no need. -1357 boolean forceFlush = getTableDescriptor().getRegionReplication() > 1; -1358 -1359 MonitoredTask status = TaskMonitor.get().createStatus("Recovering region " + this); -1360 -1361 try { -1362 // force a flush first -1363 if (forceFlush) { -1364 status.setStatus("Flushing region " + this + " because recovery is finished"); -1365 internalFlushcache(status); -1366 } -1367 -1368 status.setStatus("Writing region open event marker to WAL because recovery is finished"); -1369 try { -1370 long seqId = openSeqNum; -1371 // obtain a new seqId because we possibly have writes and flushes on top of openSeqNum -1372 if (wal != null) { -1373 seqId = getNextSequenceId(wal); -1374 } -1375 writeRegionOpenMarker(wal, seqId); -1376 } catch (IOException e) { -1377 // We cannot rethrow this exception since we are being called from the zk thread. The -1378 // region has already opened. In this case we log the error, but continue -1379 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to write region opening " -1380 + "event to WAL, continuing", e); -1381 } -1382 } catch (IOException ioe) { -1383 // Distributed log replay semantics does not necessarily require a flush, since the replayed -1384 // data is already written again in the WAL. So failed flush should be fine. -1385 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush " -1386 + "event to WAL, continuing", ioe); -1387 } finally { -1388 status.cleanup(); -1389 } -1390 } -1391 -1392 this.recovering = newState; -1393 if (wasRecovering && !recovering) { -1394 // Call only when wal replay is over. -1395 coprocessorHost.postLogReplay(); -1396 } -1397 } -1398 -1399 @Override -1400 public boolean isRecovering() { -1401 return this.recovering; -1402 } -1403 -1404 @Override -1405 public boolean isAvailable() { -1406 return !isClosed() && !isClosing(); -1407 } -1408 -1409 @Override -1410 public boolean isSplittable() { -1411 boolean result = isAvailable() && !hasReferences(); -1412 LOG.info("ASKED IF SPLITTABLE " + result + " " + getRegionInfo().getShortNameToLog(), -1413 new Throwable("LOGGING: REMOVE")); -1414 // REMOVE BELOW!!!! -1415 LOG.info("DEBUG LIST ALL FILES"); -1416 for (Store store: this.stores.values()) { -1417 LOG.info("store " + store.getColumnFamilyName()); -1418 for (StoreFile sf: store.getStorefiles()) { -1419 LOG.info(sf.toStringDetailed()); -1420 } -1421 } -1422 return result; -1423 } -1424 -1425 @Override -1426 public boolean isMergeable() { -1427 if (!isAvailable()) { -1428 LOG.debug("Region " + this -1429 + " is not mergeable because it is closing or closed"); -1430 return false; -1431 } -1432 if (hasReferences()) { -1433 LOG.debug("Region " + this -1434 + " is not mergeable because it has references"); -1435 return false; -1436 } -1437 -1438 return true; -1439 } -1440 -1441 public boolean areWritesEnabled() { -1442 synchronized(this.writestate) { -1443 return this.writestate.writesEnabled; -1444 } -1445 } -1446 -1447 @VisibleForTesting -1448 public MultiVersionConcurrencyControl getMVCC() { -1449 return mvcc; -1450 } -1451 -1452 @Override -1453 public long getMaxFlushedSeqId() { -1454 return maxFlushedSeqId; -1455 } -1456 -1457 @Override -1458 public long getReadPoint(IsolationLevel isolationLevel) { -1459 if (isolationLevel != null && isolationLevel == IsolationLevel.READ_UNCOMMITTED) { -1460 // This scan can read even uncommitted transactions -1461 return Long.MAX_VALUE; -1462 } -1463 return mvcc.getReadPoint(); -1464 } -1465 -1466 @Override -1467 public long getReadpoint(IsolationLevel isolationLevel) { -1468 return getReadPoint(isolationLevel); -1469 } -1470 -1471 @Override -1472 public boolean isLoadingCfsOnDemandDefault() { -1473 return this.isLoadingCfsOnDemandDefault; -1474 } -1475 -1476 /** -1477 * Close down this HRegion. Flush the cache, shut down each HStore, don't -1478 * service any more calls. -1479 * -1480 * <p>This method could take some time to execute, so don't call it from a -1481 * time-sensitive thread. -1482 * -1483 * @return Vector of all the storage files that the HRegion's component -1484 * HStores make use of. It's a list of all StoreFile objects. Returns empty -1485 * vector if already closed and null if judged that it should not close. -1486 * -1487 * @throws IOException e -1488 * @throws DroppedSnapshotException Thrown when replay of wal is required -1489 * because a Snapshot was not properly persisted. The region is put in closing mode, and the -1490 * caller MUST abort after this. -1491 */ -1492 public Map<byte[], List<StoreFile>> close() throws IOException { -1493 return close(false); -1494 } -1495 -1496 private final Object closeLock = new Object(); -1497 -1498 /** Conf key for the periodic flush interval */ -1499 public static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL = -1500 "hbase.regionserver.optionalcacheflushinterval"; -1501 /** Default interval for the memstore flush */ -1502 public static final int DEFAULT_CACHE_FLUSH_INTERVAL = 3600000; -1503 /** Default interval for System tables memstore flush */ -1504 public static final int SYSTEM_CACHE_FLUSH_INTERVAL = 300000; // 5 minutes -1505 -1506 /** Conf key to force a flush if there are already enough changes for one region in memstore */ -1507 public static final String MEMSTORE_FLUSH_PER_CHANGES = -1508 "hbase.regionserver.flush.per.changes"; -1509 public static final long DEFAULT_FLUSH_PER_CHANGES = 30000000; // 30 millions -1510 /** -1511 * The following MAX_FLUSH_PER_CHANGES is large enough because each KeyValue has 20+ bytes -1512 * overhead. Therefore, even 1G empty KVs occupy at least 20GB memstore size for a single region -1513 */ -1514 public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G -1515 -1516 /** -1517 * Close down this HRegion. Flush the cache unless abort parameter is true, -1518 * Shut down each HStore, don't service any more calls. -1519 * -1520 * This method could take some time to execute, so don't call it from a -1521 * time-sensitive thread. -1522 * -1523 * @param abort true if server is aborting (only during testing) -1524 * @return Vector of all the storage files that the HRegion's component -1525 * HStores make use of. It's a list of StoreFile objects. Can be null if -1526 * we are not to close at this time or we are already closed. -1527 * -1528 * @throws IOException e -1529 * @throws DroppedSnapshotException Thrown when replay of wal is required -1530 * because a Snapshot was not properly persisted. The region is put in closing mode, and the -1531 * caller MUST abort after this. -1532 */ -1533 public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException { -1534 // Only allow one thread to close at a time. Serialize them so dual -1535 // threads attempting to close will run up against each other. -1536 MonitoredTask status = TaskMonitor.get().createStatus( -1537 "Closing region " + this + -1538 (abort ? " due to abort" : "")); -1539 -1540 status.setStatus("Waiting for close lock"); -1541 try { -1542 synchronized (closeLock) { -1543 return doClose(abort, status); -1544 } -1545 } finally { -1546 status.cleanup(); -1547 } -1548 } -1549 -1550 /** -1551 * Exposed for some very specific unit tests. -1552 */ -1553 @VisibleForTesting -1554 public void setClosing(boolean closing) { -1555 this.closing.set(closing); -1556 } -1557 -1558 /** -1559 * The {@link HRegion#doClose} will block forever if someone tries proving the dead lock via the unit test. -1560 * Instead of blocking, the {@link HRegion#doClose} will throw exception if you set the timeout. -1561 * @param timeoutForWriteLock the second time to wait for the write lock in {@link HRegion#doClose} -1562 */ -1563 @VisibleForTesting -1564 public void setTimeoutForWriteLock(long timeoutForWriteLock) { -1565 assert timeoutForWriteLock >= 0; -1566 this.timeoutForWriteLock = timeoutForWriteLock; -1567 } -1568 -1569 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK_EXCEPTION_PATH", -1570 justification="I think FindBugs is confused") -1571 private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) -1572 throws IOException { -1573 if (isClosed()) { -1574 LOG.warn("Region " + this + " already closed"); -1575 return null; -1576 } -1577 -1578 if (coprocessorHost != null) { -1579 status.setStatus("Running coprocessor pre-close hooks"); -1580 this.coprocessorHost.preClose(abort); -1581 } -1582 status.setStatus("Disabling compacts and flushes for region"); -1583 boolean canFlush = true; -1584 synchronized (writestate) { -1585 // Disable compacting and flushing by background threads for this -1586 // region. -1587 canFlush = !writestate.readOnly; -1588 writestate.writesEnabled = false; -1589 LOG.debug("Closing " + this + ": disabling compactions & flushes"); -1590 waitForFlushesAndCompactions(); -1591 } -1592 // If we were not just flushing, is it worth doing a preflush...one -1593 // that will clear out of the bulk of the memstore before we put up -1594 // the close flag? -1595 if (!abort && worthPreFlushing() && canFlush) { -1596 status.setStatus("Pre-flushing region before close"); -1597 LOG.info("Running close preflush of " + this); -1598 try { -1599 internalFlushcache(status); -1600 } catch (IOException ioe) { -1601 // Failed to flush the region. Keep going. -1602 status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage()); -1603 } -1604 } -1605 -1606 if (timeoutForWriteLock == null -1607 || timeoutForWriteLock == Long.MAX_VALUE) { -1608 // block waiting for the lock for closing -1609 lock.writeLock().lock(); // FindBugs: Complains UL_UNRELEASED_LOCK_EXCEPTION_PATH but seems fine -1