Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 13E25190B0 for ; Wed, 27 Apr 2016 17:06:11 +0000 (UTC) Received: (qmail 85599 invoked by uid 500); 27 Apr 2016 17:06:10 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 85451 invoked by uid 500); 27 Apr 2016 17:06:10 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 84999 invoked by uid 99); 27 Apr 2016 17:06:09 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 27 Apr 2016 17:06:09 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9BAFAE05E1; Wed, 27 Apr 2016 17:06:09 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: busbey@apache.org To: commits@hbase.apache.org Date: Wed, 27 Apr 2016 17:06:15 -0000 Message-Id: <95ebbfa9eff647048f2b4078c948d8c7@git.apache.org> In-Reply-To: <5939d0173bb849d18ee19fbbce9d3dc6@git.apache.org> References: <5939d0173bb849d18ee19fbbce9d3dc6@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [07/51] [partial] hbase-site git commit: Published site at ce318a2906817058ae7b2fce6e9b54d9d6230f9b. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4131cace/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html index 0941f08..4bec2fc 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.html @@ -120,223 +120,230 @@ 112 throws IOException { 113 long mcTime = getNextMajorCompactTime(filesToCompact); 114 if (filesToCompact == null || mcTime == 0) { -115 return false; -116 } -117 -118 // TODO: Use better method for determining stamp of last major (HBASE-2990) -119 long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact); -120 long now = EnvironmentEdgeManager.currentTime(); -121 if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) { -122 return false; -123 } -124 -125 long cfTTL = this.storeConfigInfo.getStoreFileTtl(); -126 HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); -127 List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, now); -128 boolean[] filesInWindow = new boolean[boundaries.size()]; -129 -130 for (StoreFile file: filesToCompact) { -131 Long minTimestamp = file.getMinimumTimestamp(); -132 long oldest = (minTimestamp == null) ? Long.MIN_VALUE : now - minTimestamp.longValue(); -133 if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) { -134 LOG.debug("Major compaction triggered on store " + this -135 + "; for TTL maintenance"); -136 return true; -137 } -138 if (!file.isMajorCompaction() || file.isBulkLoadResult()) { -139 LOG.debug("Major compaction triggered on store " + this -140 + ", because there are new files and time since last major compaction " -141 + (now - lowTimestamp) + "ms"); -142 return true; -143 } -144 -145 int lowerWindowIndex = Collections.binarySearch(boundaries, -146 minTimestamp == null ? (Long)Long.MAX_VALUE : minTimestamp); -147 int upperWindowIndex = Collections.binarySearch(boundaries, -148 file.getMaximumTimestamp() == null ? (Long)Long.MAX_VALUE : file.getMaximumTimestamp()); -149 if (lowerWindowIndex != upperWindowIndex) { -150 LOG.debug("Major compaction triggered on store " + this + "; because file " -151 + file.getPath() + " has data with timestamps cross window boundaries"); -152 return true; -153 } else if (filesInWindow[upperWindowIndex]) { -154 LOG.debug("Major compaction triggered on store " + this + -155 "; because there are more than one file in some windows"); -156 return true; -157 } else { -158 filesInWindow[upperWindowIndex] = true; -159 } -160 hdfsBlocksDistribution.add(file.getHDFSBlockDistribution()); -161 } -162 -163 float blockLocalityIndex = hdfsBlocksDistribution -164 .getBlockLocalityIndex(RSRpcServices.getHostname(comConf.conf, false)); -165 if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { -166 LOG.debug("Major compaction triggered on store " + this -167 + "; to make hdfs blocks local, current blockLocalityIndex is " -168 + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); -169 return true; -170 } -171 -172 LOG.debug("Skipping major compaction of " + this + -173 ", because the files are already major compacted"); -174 return false; -175 } -176 -177 @Override -178 protected CompactionRequest createCompactionRequest(ArrayList<StoreFile> candidateSelection, -179 boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { -180 CompactionRequest result = tryingMajor ? selectMajorCompaction(candidateSelection) -181 : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); -182 if (LOG.isDebugEnabled()) { -183 LOG.debug("Generated compaction request: " + result); -184 } -185 return result; -186 } -187 -188 public CompactionRequest selectMajorCompaction(ArrayList<StoreFile> candidateSelection) { -189 long now = EnvironmentEdgeManager.currentTime(); -190 return new DateTieredCompactionRequest(candidateSelection, -191 this.getCompactBoundariesForMajor(candidateSelection, now)); -192 } -193 -194 /** -195 * We receive store files sorted in ascending order by seqId then scan the list of files. If the -196 * current file has a maxTimestamp older than last known maximum, treat this file as it carries -197 * the last known maximum. This way both seqId and timestamp are in the same order. If files carry -198 * the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered -199 * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order -200 * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. -201 */ -202 public CompactionRequest selectMinorCompaction(ArrayList<StoreFile> candidateSelection, -203 boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { -204 long now = EnvironmentEdgeManager.currentTime(); -205 long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); -206 -207 List<Pair<StoreFile, Long>> storefileMaxTimestampPairs = -208 Lists.newArrayListWithCapacity(candidateSelection.size()); -209 long maxTimestampSeen = Long.MIN_VALUE; -210 for (StoreFile storeFile : candidateSelection) { -211 // if there is out-of-order data, -212 // we put them in the same window as the last file in increasing order -213 maxTimestampSeen = Math.max(maxTimestampSeen, -214 storeFile.getMaximumTimestamp() == null? Long.MIN_VALUE : storeFile.getMaximumTimestamp()); -215 storefileMaxTimestampPairs.add(new Pair<StoreFile, Long>(storeFile, maxTimestampSeen)); -216 } -217 Collections.reverse(storefileMaxTimestampPairs); -218 -219 CompactionWindow window = getIncomingWindow(now); -220 int minThreshold = comConf.getDateTieredIncomingWindowMin(); -221 PeekingIterator<Pair<StoreFile, Long>> it = -222 Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); -223 while (it.hasNext()) { -224 if (window.compareToTimestamp(oldestToCompact) < 0) { -225 break; -226 } -227 int compResult = window.compareToTimestamp(it.peek().getSecond()); -228 if (compResult > 0) { -229 // If the file is too old for the window, switch to the next window -230 window = window.nextEarlierWindow(); -231 minThreshold = comConf.getMinFilesToCompact(); -232 } else { -233 // The file is within the target window -234 ArrayList<StoreFile> fileList = Lists.newArrayList(); -235 // Add all files in the same window. For incoming window -236 // we tolerate files with future data although it is sub-optimal -237 while (it.hasNext() && window.compareToTimestamp(it.peek().getSecond()) <= 0) { -238 fileList.add(it.next().getFirst()); -239 } -240 if (fileList.size() >= minThreshold) { -241 if (LOG.isDebugEnabled()) { -242 LOG.debug("Processing files: " + fileList + " for window: " + window); -243 } -244 DateTieredCompactionRequest request = generateCompactionRequest(fileList, window, -245 mayUseOffPeak, mayBeStuck, minThreshold); -246 if (request != null) { -247 return request; -248 } -249 } -250 } -251 } -252 // A non-null file list is expected by HStore -253 return new CompactionRequest(Collections.<StoreFile> emptyList()); -254 } -255 -256 private DateTieredCompactionRequest generateCompactionRequest(ArrayList<StoreFile> storeFiles, -257 CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold) -258 throws IOException { -259 // The files has to be in ascending order for ratio-based compaction to work right -260 // and removeExcessFile to exclude youngest files. -261 Collections.reverse(storeFiles); +115 if (LOG.isDebugEnabled()) { +116 LOG.debug("filesToCompact: " + filesToCompact + " mcTime: " + mcTime); +117 } +118 return false; +119 } +120 +121 // TODO: Use better method for determining stamp of last major (HBASE-2990) +122 long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact); +123 long now = EnvironmentEdgeManager.currentTime(); +124 if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) { +125 if (LOG.isDebugEnabled()) { +126 LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + +127 now + " mcTime: " + mcTime); +128 } +129 return false; +130 } +131 +132 long cfTTL = this.storeConfigInfo.getStoreFileTtl(); +133 HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); +134 List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, now); +135 boolean[] filesInWindow = new boolean[boundaries.size()]; +136 +137 for (StoreFile file: filesToCompact) { +138 Long minTimestamp = file.getMinimumTimestamp(); +139 long oldest = (minTimestamp == null) ? Long.MIN_VALUE : now - minTimestamp.longValue(); +140 if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) { +141 LOG.debug("Major compaction triggered on store " + this +142 + "; for TTL maintenance"); +143 return true; +144 } +145 if (!file.isMajorCompaction() || file.isBulkLoadResult()) { +146 LOG.debug("Major compaction triggered on store " + this +147 + ", because there are new files and time since last major compaction " +148 + (now - lowTimestamp) + "ms"); +149 return true; +150 } +151 +152 int lowerWindowIndex = Collections.binarySearch(boundaries, +153 minTimestamp == null ? (Long)Long.MAX_VALUE : minTimestamp); +154 int upperWindowIndex = Collections.binarySearch(boundaries, +155 file.getMaximumTimestamp() == null ? (Long)Long.MAX_VALUE : file.getMaximumTimestamp()); +156 if (lowerWindowIndex != upperWindowIndex) { +157 LOG.debug("Major compaction triggered on store " + this + "; because file " +158 + file.getPath() + " has data with timestamps cross window boundaries"); +159 return true; +160 } else if (filesInWindow[upperWindowIndex]) { +161 LOG.debug("Major compaction triggered on store " + this + +162 "; because there are more than one file in some windows"); +163 return true; +164 } else { +165 filesInWindow[upperWindowIndex] = true; +166 } +167 hdfsBlocksDistribution.add(file.getHDFSBlockDistribution()); +168 } +169 +170 float blockLocalityIndex = hdfsBlocksDistribution +171 .getBlockLocalityIndex(RSRpcServices.getHostname(comConf.conf, false)); +172 if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { +173 LOG.debug("Major compaction triggered on store " + this +174 + "; to make hdfs blocks local, current blockLocalityIndex is " +175 + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); +176 return true; +177 } +178 +179 LOG.debug("Skipping major compaction of " + this + +180 ", because the files are already major compacted"); +181 return false; +182 } +183 +184 @Override +185 protected CompactionRequest createCompactionRequest(ArrayList<StoreFile> candidateSelection, +186 boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { +187 CompactionRequest result = tryingMajor ? selectMajorCompaction(candidateSelection) +188 : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); +189 if (LOG.isDebugEnabled()) { +190 LOG.debug("Generated compaction request: " + result); +191 } +192 return result; +193 } +194 +195 public CompactionRequest selectMajorCompaction(ArrayList<StoreFile> candidateSelection) { +196 long now = EnvironmentEdgeManager.currentTime(); +197 return new DateTieredCompactionRequest(candidateSelection, +198 this.getCompactBoundariesForMajor(candidateSelection, now)); +199 } +200 +201 /** +202 * We receive store files sorted in ascending order by seqId then scan the list of files. If the +203 * current file has a maxTimestamp older than last known maximum, treat this file as it carries +204 * the last known maximum. This way both seqId and timestamp are in the same order. If files carry +205 * the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered +206 * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order +207 * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. +208 */ +209 public CompactionRequest selectMinorCompaction(ArrayList<StoreFile> candidateSelection, +210 boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { +211 long now = EnvironmentEdgeManager.currentTime(); +212 long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now); +213 +214 List<Pair<StoreFile, Long>> storefileMaxTimestampPairs = +215 Lists.newArrayListWithCapacity(candidateSelection.size()); +216 long maxTimestampSeen = Long.MIN_VALUE; +217 for (StoreFile storeFile : candidateSelection) { +218 // if there is out-of-order data, +219 // we put them in the same window as the last file in increasing order +220 maxTimestampSeen = Math.max(maxTimestampSeen, +221 storeFile.getMaximumTimestamp() == null? Long.MIN_VALUE : storeFile.getMaximumTimestamp()); +222 storefileMaxTimestampPairs.add(new Pair<StoreFile, Long>(storeFile, maxTimestampSeen)); +223 } +224 Collections.reverse(storefileMaxTimestampPairs); +225 +226 CompactionWindow window = getIncomingWindow(now); +227 int minThreshold = comConf.getDateTieredIncomingWindowMin(); +228 PeekingIterator<Pair<StoreFile, Long>> it = +229 Iterators.peekingIterator(storefileMaxTimestampPairs.iterator()); +230 while (it.hasNext()) { +231 if (window.compareToTimestamp(oldestToCompact) < 0) { +232 break; +233 } +234 int compResult = window.compareToTimestamp(it.peek().getSecond()); +235 if (compResult > 0) { +236 // If the file is too old for the window, switch to the next window +237 window = window.nextEarlierWindow(); +238 minThreshold = comConf.getMinFilesToCompact(); +239 } else { +240 // The file is within the target window +241 ArrayList<StoreFile> fileList = Lists.newArrayList(); +242 // Add all files in the same window. For incoming window +243 // we tolerate files with future data although it is sub-optimal +244 while (it.hasNext() && window.compareToTimestamp(it.peek().getSecond()) <= 0) { +245 fileList.add(it.next().getFirst()); +246 } +247 if (fileList.size() >= minThreshold) { +248 if (LOG.isDebugEnabled()) { +249 LOG.debug("Processing files: " + fileList + " for window: " + window); +250 } +251 DateTieredCompactionRequest request = generateCompactionRequest(fileList, window, +252 mayUseOffPeak, mayBeStuck, minThreshold); +253 if (request != null) { +254 return request; +255 } +256 } +257 } +258 } +259 // A non-null file list is expected by HStore +260 return new CompactionRequest(Collections.<StoreFile> emptyList()); +261 } 262 -263 // Compact everything in the window if have more files than comConf.maxBlockingFiles -264 compactionPolicyPerWindow.setMinThreshold(minThreshold); -265 ArrayList<StoreFile> storeFileSelection = mayBeStuck ? storeFiles -266 : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); -267 if (storeFileSelection != null && !storeFileSelection.isEmpty()) { -268 // If there is any file in the window excluded from compaction, -269 // only one file will be output from compaction. -270 boolean singleOutput = storeFiles.size() != storeFileSelection.size() || -271 comConf.useDateTieredSingleOutputForMinorCompaction(); -272 List<Long> boundaries = getCompactionBoundariesForMinor(window, singleOutput); -273 DateTieredCompactionRequest result = new DateTieredCompactionRequest(storeFileSelection, -274 boundaries); -275 return result; -276 } -277 return null; -278 } -279 -280 /** -281 * Return a list of boundaries for multiple compaction output -282 * in ascending order. -283 */ -284 private List<Long> getCompactBoundariesForMajor(Collection<StoreFile> filesToCompact, long now) { -285 long minTimestamp = Long.MAX_VALUE; -286 for (StoreFile file : filesToCompact) { -287 minTimestamp = -288 Math.min(minTimestamp, -289 file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp()); -290 } -291 -292 List<Long> boundaries = new ArrayList<Long>(); -293 -294 // Add startMillis of all windows between now and min timestamp -295 for (CompactionWindow window = getIncomingWindow(now); -296 window.compareToTimestamp(minTimestamp) > 0; -297 window = window.nextEarlierWindow()) { -298 boundaries.add(window.startMillis()); -299 } -300 boundaries.add(Long.MIN_VALUE); -301 Collections.reverse(boundaries); -302 return boundaries; -303 } -304 -305 /** -306 * @return a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. -307 */ -308 private static List<Long> getCompactionBoundariesForMinor(CompactionWindow window, -309 boolean singleOutput) { -310 List<Long> boundaries = new ArrayList<Long>(); -311 boundaries.add(Long.MIN_VALUE); -312 if (!singleOutput) { -313 boundaries.add(window.startMillis()); -314 } -315 return boundaries; -316 } -317 -318 private CompactionWindow getIncomingWindow(long now) { -319 return windowFactory.newIncomingWindow(now); -320 } -321 -322 private static long getOldestToCompact(long maxAgeMillis, long now) { -323 try { -324 return LongMath.checkedSubtract(now, maxAgeMillis); -325 } catch (ArithmeticException ae) { -326 LOG.warn("Value for " + CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY + ": " -327 + maxAgeMillis + ". All the files will be eligible for minor compaction."); -328 return Long.MIN_VALUE; -329 } -330 } -331} +263 private DateTieredCompactionRequest generateCompactionRequest(ArrayList<StoreFile> storeFiles, +264 CompactionWindow window, boolean mayUseOffPeak, boolean mayBeStuck, int minThreshold) +265 throws IOException { +266 // The files has to be in ascending order for ratio-based compaction to work right +267 // and removeExcessFile to exclude youngest files. +268 Collections.reverse(storeFiles); +269 +270 // Compact everything in the window if have more files than comConf.maxBlockingFiles +271 compactionPolicyPerWindow.setMinThreshold(minThreshold); +272 ArrayList<StoreFile> storeFileSelection = mayBeStuck ? storeFiles +273 : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); +274 if (storeFileSelection != null && !storeFileSelection.isEmpty()) { +275 // If there is any file in the window excluded from compaction, +276 // only one file will be output from compaction. +277 boolean singleOutput = storeFiles.size() != storeFileSelection.size() || +278 comConf.useDateTieredSingleOutputForMinorCompaction(); +279 List<Long> boundaries = getCompactionBoundariesForMinor(window, singleOutput); +280 DateTieredCompactionRequest result = new DateTieredCompactionRequest(storeFileSelection, +281 boundaries); +282 return result; +283 } +284 return null; +285 } +286 +287 /** +288 * Return a list of boundaries for multiple compaction output +289 * in ascending order. +290 */ +291 private List<Long> getCompactBoundariesForMajor(Collection<StoreFile> filesToCompact, long now) { +292 long minTimestamp = Long.MAX_VALUE; +293 for (StoreFile file : filesToCompact) { +294 minTimestamp = +295 Math.min(minTimestamp, +296 file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp()); +297 } +298 +299 List<Long> boundaries = new ArrayList<Long>(); +300 +301 // Add startMillis of all windows between now and min timestamp +302 for (CompactionWindow window = getIncomingWindow(now); +303 window.compareToTimestamp(minTimestamp) > 0; +304 window = window.nextEarlierWindow()) { +305 boundaries.add(window.startMillis()); +306 } +307 boundaries.add(Long.MIN_VALUE); +308 Collections.reverse(boundaries); +309 return boundaries; +310 } +311 +312 /** +313 * @return a list of boundaries for multiple compaction output from minTimestamp to maxTimestamp. +314 */ +315 private static List<Long> getCompactionBoundariesForMinor(CompactionWindow window, +316 boolean singleOutput) { +317 List<Long> boundaries = new ArrayList<Long>(); +318 boundaries.add(Long.MIN_VALUE); +319 if (!singleOutput) { +320 boundaries.add(window.startMillis()); +321 } +322 return boundaries; +323 } +324 +325 private CompactionWindow getIncomingWindow(long now) { +326 return windowFactory.newIncomingWindow(now); +327 } +328 +329 private static long getOldestToCompact(long maxAgeMillis, long now) { +330 try { +331 return LongMath.checkedSubtract(now, maxAgeMillis); +332 } catch (ArithmeticException ae) { +333 LOG.warn("Value for " + CompactionConfiguration.DATE_TIERED_MAX_AGE_MILLIS_KEY + ": " +334 + maxAgeMillis + ". All the files will be eligible for minor compaction."); +335 return Long.MIN_VALUE; +336 } +337 } +338} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4131cace/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.Iter.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.Iter.html b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.Iter.html index 97dce02..9064565 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.Iter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.Iter.html @@ -856,7 +856,27 @@ 848 CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { 849 throw new UnsupportedOperationException("checkAndMutate not implemented"); 850 } -851} +851 +852 @Override +853 public void setOperationTimeout(int operationTimeout) { +854 throw new UnsupportedOperationException(); +855 } +856 +857 @Override +858 public int getOperationTimeout() { +859 throw new UnsupportedOperationException(); +860 } +861 +862 @Override +863 public void setRpcTimeout(int rpcTimeout) { +864 throw new UnsupportedOperationException(); +865 } +866 +867 @Override +868 public int getRpcTimeout() { +869 throw new UnsupportedOperationException(); +870 } +871} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4131cace/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.html b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.html index 97dce02..9064565 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.Scanner.html @@ -856,7 +856,27 @@ 848 CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { 849 throw new UnsupportedOperationException("checkAndMutate not implemented"); 850 } -851} +851 +852 @Override +853 public void setOperationTimeout(int operationTimeout) { +854 throw new UnsupportedOperationException(); +855 } +856 +857 @Override +858 public int getOperationTimeout() { +859 throw new UnsupportedOperationException(); +860 } +861 +862 @Override +863 public void setRpcTimeout(int rpcTimeout) { +864 throw new UnsupportedOperationException(); +865 } +866 +867 @Override +868 public int getRpcTimeout() { +869 throw new UnsupportedOperationException(); +870 } +871} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4131cace/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html index 97dce02..9064565 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html @@ -856,7 +856,27 @@ 848 CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { 849 throw new UnsupportedOperationException("checkAndMutate not implemented"); 850 } -851} +851 +852 @Override +853 public void setOperationTimeout(int operationTimeout) { +854 throw new UnsupportedOperationException(); +855 } +856 +857 @Override +858 public int getOperationTimeout() { +859 throw new UnsupportedOperationException(); +860 } +861 +862 @Override +863 public void setRpcTimeout(int rpcTimeout) { +864 throw new UnsupportedOperationException(); +865 } +866 +867 @Override +868 public int getRpcTimeout() { +869 throw new UnsupportedOperationException(); +870 } +871}