From commits-return-70301-archive-asf-public=cust-asf.ponee.io@hbase.apache.org Tue Mar 27 16:48:36 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 6E7121807BA for ; Tue, 27 Mar 2018 16:48:30 +0200 (CEST) Received: (qmail 87747 invoked by uid 500); 27 Mar 2018 14:48:27 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 86283 invoked by uid 99); 27 Mar 2018 14:48:26 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 27 Mar 2018 14:48:26 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id C1F40F6892; Tue, 27 Mar 2018 14:48:25 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: git-site-role@apache.org To: commits@hbase.apache.org Date: Tue, 27 Mar 2018 14:48:42 -0000 Message-Id: <596d44d8f7b04b4f97c9f958307d102a@git.apache.org> In-Reply-To: <060b86d9c90248839ae2f26df8768f36@git.apache.org> References: <060b86d9c90248839ae2f26df8768f36@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [18/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html index d7aa8b1..98a45a0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html @@ -680,1330 +680,1333 @@ 672 } 673 List<HRegionLocation> locations = new ArrayList<>(); 674 for (RegionInfo regionInfo : regions) { -675 RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); -676 if (list != null) { -677 for (HRegionLocation loc : list.getRegionLocations()) { -678 if (loc != null) { -679 locations.add(loc); -680 } -681 } -682 } -683 } -684 return locations; -685 } -686 -687 @Override -688 public HRegionLocation locateRegion(final TableName tableName, final byte[] row) -689 throws IOException { -690 RegionLocations locations = locateRegion(tableName, row, true, true); -691 return locations == null ? null : locations.getRegionLocation(); -692 } -693 -694 @Override -695 public HRegionLocation relocateRegion(final TableName tableName, final byte[] row) -696 throws IOException { -697 RegionLocations locations = -698 relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); -699 return locations == null ? null -700 : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); -701 } -702 -703 @Override -704 public RegionLocations relocateRegion(final TableName tableName, -705 final byte [] row, int replicaId) throws IOException{ -706 // Since this is an explicit request not to use any caching, finding -707 // disabled tables should not be desirable. This will ensure that an exception is thrown when -708 // the first time a disabled table is interacted with. -709 if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) { -710 throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); -711 } -712 -713 return locateRegion(tableName, row, false, true, replicaId); -714 } +675 if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { +676 continue; +677 } +678 RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); +679 if (list != null) { +680 for (HRegionLocation loc : list.getRegionLocations()) { +681 if (loc != null) { +682 locations.add(loc); +683 } +684 } +685 } +686 } +687 return locations; +688 } +689 +690 @Override +691 public HRegionLocation locateRegion(final TableName tableName, final byte[] row) +692 throws IOException { +693 RegionLocations locations = locateRegion(tableName, row, true, true); +694 return locations == null ? null : locations.getRegionLocation(); +695 } +696 +697 @Override +698 public HRegionLocation relocateRegion(final TableName tableName, final byte[] row) +699 throws IOException { +700 RegionLocations locations = +701 relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); +702 return locations == null ? null +703 : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); +704 } +705 +706 @Override +707 public RegionLocations relocateRegion(final TableName tableName, +708 final byte [] row, int replicaId) throws IOException{ +709 // Since this is an explicit request not to use any caching, finding +710 // disabled tables should not be desirable. This will ensure that an exception is thrown when +711 // the first time a disabled table is interacted with. +712 if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) { +713 throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); +714 } 715 -716 @Override -717 public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, -718 boolean retry) throws IOException { -719 return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); -720 } -721 -722 @Override -723 public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, -724 boolean retry, int replicaId) throws IOException { -725 checkClosed(); -726 if (tableName == null || tableName.getName().length == 0) { -727 throw new IllegalArgumentException("table name cannot be null or zero length"); -728 } -729 if (tableName.equals(TableName.META_TABLE_NAME)) { -730 return locateMeta(tableName, useCache, replicaId); -731 } else { -732 // Region not in the cache - have to go to the meta RS -733 return locateRegionInMeta(tableName, row, useCache, retry, replicaId); -734 } -735 } -736 -737 private RegionLocations locateMeta(final TableName tableName, -738 boolean useCache, int replicaId) throws IOException { -739 // HBASE-10785: We cache the location of the META itself, so that we are not overloading -740 // zookeeper with one request for every region lookup. We cache the META with empty row -741 // key in MetaCache. -742 byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the row for meta -743 RegionLocations locations = null; -744 if (useCache) { -745 locations = getCachedLocation(tableName, metaCacheKey); -746 if (locations != null && locations.getRegionLocation(replicaId) != null) { -747 return locations; -748 } -749 } -750 -751 // only one thread should do the lookup. -752 synchronized (metaRegionLock) { -753 // Check the cache again for a hit in case some other thread made the -754 // same query while we were waiting on the lock. -755 if (useCache) { -756 locations = getCachedLocation(tableName, metaCacheKey); -757 if (locations != null && locations.getRegionLocation(replicaId) != null) { -758 return locations; -759 } -760 } -761 -762 // Look up from zookeeper -763 locations = get(this.registry.getMetaRegionLocation()); -764 if (locations != null) { -765 cacheLocation(tableName, locations); -766 } -767 } -768 return locations; -769 } -770 -771 /* -772 * Search the hbase:meta table for the HRegionLocation -773 * info that contains the table and row we're seeking. -774 */ -775 private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, -776 boolean useCache, boolean retry, int replicaId) throws IOException { -777 -778 // If we are supposed to be using the cache, look in the cache to see if -779 // we already have the region. -780 if (useCache) { -781 RegionLocations locations = getCachedLocation(tableName, row); -782 if (locations != null && locations.getRegionLocation(replicaId) != null) { -783 return locations; -784 } -785 } -786 -787 // build the key of the meta region we should be looking for. -788 // the extra 9's on the end are necessary to allow "exact" matches -789 // without knowing the precise region names. -790 byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); -791 byte[] metaStopKey = -792 RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); -793 -794 Scan s = new Scan(); -795 s.setReversed(true); -796 s.withStartRow(metaStartKey); -797 s.withStopRow(metaStopKey, true); -798 s.addFamily(HConstants.CATALOG_FAMILY); -799 -800 if (this.useMetaReplicas) { -801 s.setConsistency(Consistency.TIMELINE); -802 } -803 -804 int maxAttempts = (retry ? numTries : 1); -805 for (int tries = 0; true; tries++) { -806 if (tries >= maxAttempts) { -807 throw new NoServerForRegionException("Unable to find region for " -808 + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries."); -809 } -810 if (useCache) { -811 RegionLocations locations = getCachedLocation(tableName, row); -812 if (locations != null && locations.getRegionLocation(replicaId) != null) { -813 return locations; -814 } -815 } else { -816 // If we are not supposed to be using the cache, delete any existing cached location -817 // so it won't interfere. -818 // We are only supposed to clean the cache for the specific replicaId -819 metaCache.clearCache(tableName, row, replicaId); -820 } -821 -822 // Query the meta region -823 long pauseBase = this.pause; -824 userRegionLock.lock(); -825 try { -826 if (useCache) {// re-check cache after get lock -827 RegionLocations locations = getCachedLocation(tableName, row); -828 if (locations != null && locations.getRegionLocation(replicaId) != null) { -829 return locations; -830 } -831 } -832 Result regionInfoRow = null; -833 s.resetMvccReadPoint(); -834 s.setOneRowLimit(); -835 try (ReversedClientScanner rcs = -836 new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, -837 rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { -838 regionInfoRow = rcs.next(); -839 } -840 -841 if (regionInfoRow == null) { -842 throw new TableNotFoundException(tableName); -843 } -844 // convert the row result into the HRegionLocation we need! -845 RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow); -846 if (locations == null || locations.getRegionLocation(replicaId) == null) { -847 throw new IOException("RegionInfo null in " + tableName + ", row=" + regionInfoRow); -848 } -849 RegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegion(); -850 if (regionInfo == null) { -851 throw new IOException("RegionInfo null or empty in " + -852 TableName.META_TABLE_NAME + ", row=" + regionInfoRow); -853 } -854 -855 // possible we got a region of a different table... -856 if (!regionInfo.getTable().equals(tableName)) { -857 throw new TableNotFoundException( -858 "Region of '" + regionInfo.getRegionNameAsString() + "' is expected in the table of '" + tableName + "', " + -859 "but hbase:meta says it is in the table of '" + regionInfo.getTable() + "'. " + -860 "hbase:meta might be damaged."); -861 } -862 if (regionInfo.isSplit()) { -863 throw new RegionOfflineException ("Region for row is a split parent, daughters not online: " + -864 regionInfo.getRegionNameAsString()); -865 } -866 if (regionInfo.isOffline()) { -867 throw new RegionOfflineException("Region offline; disable table call? " + -868 regionInfo.getRegionNameAsString()); -869 } -870 -871 ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); -872 if (serverName == null) { -873 throw new NoServerForRegionException("No server address listed in " -874 + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() -875 + " containing row " + Bytes.toStringBinary(row)); -876 } -877 -878 if (isDeadServer(serverName)){ -879 throw new RegionServerStoppedException("hbase:meta says the region "+ -880 regionInfo.getRegionNameAsString()+" is managed by the server " + serverName + -881 ", but it is dead."); -882 } -883 // Instantiate the location -884 cacheLocation(tableName, locations); -885 return locations; -886 } catch (TableNotFoundException e) { -887 // if we got this error, probably means the table just plain doesn't -888 // exist. rethrow the error immediately. this should always be coming -889 // from the HTable constructor. -890 throw e; -891 } catch (IOException e) { -892 ExceptionUtil.rethrowIfInterrupt(e); -893 if (e instanceof RemoteException) { -894 e = ((RemoteException)e).unwrapRemoteException(); -895 } -896 if (e instanceof CallQueueTooBigException) { -897 // Give a special check on CallQueueTooBigException, see #HBASE-17114 -898 pauseBase = this.pauseForCQTBE; -899 } -900 if (tries < maxAttempts - 1) { -901 if (LOG.isDebugEnabled()) { -902 LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME -903 + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts -904 + " failed; retrying after sleep of " -905 + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage()); -906 } -907 } else { -908 throw e; -909 } -910 // Only relocate the parent region if necessary -911 if(!(e instanceof RegionOfflineException || -912 e instanceof NoServerForRegionException)) { -913 relocateRegion(TableName.META_TABLE_NAME, metaStartKey, replicaId); -914 } -915 } finally { -916 userRegionLock.unlock(); -917 } -918 try{ -919 Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries)); -920 } catch (InterruptedException e) { -921 throw new InterruptedIOException("Giving up trying to location region in " + -922 "meta: thread is interrupted."); -923 } -924 } -925 } -926 -927 /** -928 * Put a newly discovered HRegionLocation into the cache. -929 * @param tableName The table name. -930 * @param location the new location -931 */ -932 @Override -933 public void cacheLocation(final TableName tableName, final RegionLocations location) { -934 metaCache.cacheLocation(tableName, location); -935 } -936 -937 /** -938 * Search the cache for a location that fits our table and row key. -939 * Return null if no suitable region is located. -940 * @return Null or region location found in cache. -941 */ -942 RegionLocations getCachedLocation(final TableName tableName, -943 final byte [] row) { -944 return metaCache.getCachedLocation(tableName, row); -945 } -946 -947 public void clearRegionCache(final TableName tableName, byte[] row) { -948 metaCache.clearCache(tableName, row); -949 } -950 -951 /* -952 * Delete all cached entries of a table that maps to a specific location. -953 */ -954 @Override -955 public void clearCaches(final ServerName serverName) { -956 metaCache.clearCache(serverName); -957 } -958 -959 @Override -960 public void clearRegionCache() { -961 metaCache.clearCache(); -962 } -963 -964 @Override -965 public void clearRegionCache(final TableName tableName) { -966 metaCache.clearCache(tableName); -967 } -968 -969 /** -970 * Put a newly discovered HRegionLocation into the cache. -971 * @param tableName The table name. -972 * @param source the source of the new location, if it's not coming from meta -973 * @param location the new location -974 */ -975 private void cacheLocation(final TableName tableName, final ServerName source, -976 final HRegionLocation location) { -977 metaCache.cacheLocation(tableName, source, location); -978 } -979 -980 // Map keyed by service name + regionserver to service stub implementation -981 private final ConcurrentMap<String, Object> stubs = new ConcurrentHashMap<>(); +716 return locateRegion(tableName, row, false, true, replicaId); +717 } +718 +719 @Override +720 public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, +721 boolean retry) throws IOException { +722 return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); +723 } +724 +725 @Override +726 public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, +727 boolean retry, int replicaId) throws IOException { +728 checkClosed(); +729 if (tableName == null || tableName.getName().length == 0) { +730 throw new IllegalArgumentException("table name cannot be null or zero length"); +731 } +732 if (tableName.equals(TableName.META_TABLE_NAME)) { +733 return locateMeta(tableName, useCache, replicaId); +734 } else { +735 // Region not in the cache - have to go to the meta RS +736 return locateRegionInMeta(tableName, row, useCache, retry, replicaId); +737 } +738 } +739 +740 private RegionLocations locateMeta(final TableName tableName, +741 boolean useCache, int replicaId) throws IOException { +742 // HBASE-10785: We cache the location of the META itself, so that we are not overloading +743 // zookeeper with one request for every region lookup. We cache the META with empty row +744 // key in MetaCache. +745 byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the row for meta +746 RegionLocations locations = null; +747 if (useCache) { +748 locations = getCachedLocation(tableName, metaCacheKey); +749 if (locations != null && locations.getRegionLocation(replicaId) != null) { +750 return locations; +751 } +752 } +753 +754 // only one thread should do the lookup. +755 synchronized (metaRegionLock) { +756 // Check the cache again for a hit in case some other thread made the +757 // same query while we were waiting on the lock. +758 if (useCache) { +759 locations = getCachedLocation(tableName, metaCacheKey); +760 if (locations != null && locations.getRegionLocation(replicaId) != null) { +761 return locations; +762 } +763 } +764 +765 // Look up from zookeeper +766 locations = get(this.registry.getMetaRegionLocation()); +767 if (locations != null) { +768 cacheLocation(tableName, locations); +769 } +770 } +771 return locations; +772 } +773 +774 /* +775 * Search the hbase:meta table for the HRegionLocation +776 * info that contains the table and row we're seeking. +777 */ +778 private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, +779 boolean useCache, boolean retry, int replicaId) throws IOException { +780 +781 // If we are supposed to be using the cache, look in the cache to see if +782 // we already have the region. +783 if (useCache) { +784 RegionLocations locations = getCachedLocation(tableName, row); +785 if (locations != null && locations.getRegionLocation(replicaId) != null) { +786 return locations; +787 } +788 } +789 +790 // build the key of the meta region we should be looking for. +791 // the extra 9's on the end are necessary to allow "exact" matches +792 // without knowing the precise region names. +793 byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); +794 byte[] metaStopKey = +795 RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); +796 +797 Scan s = new Scan(); +798 s.setReversed(true); +799 s.withStartRow(metaStartKey); +800 s.withStopRow(metaStopKey, true); +801 s.addFamily(HConstants.CATALOG_FAMILY); +802 +803 if (this.useMetaReplicas) { +804 s.setConsistency(Consistency.TIMELINE); +805 } +806 +807 int maxAttempts = (retry ? numTries : 1); +808 for (int tries = 0; true; tries++) { +809 if (tries >= maxAttempts) { +810 throw new NoServerForRegionException("Unable to find region for " +811 + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries."); +812 } +813 if (useCache) { +814 RegionLocations locations = getCachedLocation(tableName, row); +815 if (locations != null && locations.getRegionLocation(replicaId) != null) { +816 return locations; +817 } +818 } else { +819 // If we are not supposed to be using the cache, delete any existing cached location +820 // so it won't interfere. +821 // We are only supposed to clean the cache for the specific replicaId +822 metaCache.clearCache(tableName, row, replicaId); +823 } +824 +825 // Query the meta region +826 long pauseBase = this.pause; +827 userRegionLock.lock(); +828 try { +829 if (useCache) {// re-check cache after get lock +830 RegionLocations locations = getCachedLocation(tableName, row); +831 if (locations != null && locations.getRegionLocation(replicaId) != null) { +832 return locations; +833 } +834 } +835 Result regionInfoRow = null; +836 s.resetMvccReadPoint(); +837 s.setOneRowLimit(); +838 try (ReversedClientScanner rcs = +839 new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, +840 rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { +841 regionInfoRow = rcs.next(); +842 } +843 +844 if (regionInfoRow == null) { +845 throw new TableNotFoundException(tableName); +846 } +847 // convert the row result into the HRegionLocation we need! +848 RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow); +849 if (locations == null || locations.getRegionLocation(replicaId) == null) { +850 throw new IOException("RegionInfo null in " + tableName + ", row=" + regionInfoRow); +851 } +852 RegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegion(); +853 if (regionInfo == null) { +854 throw new IOException("RegionInfo null or empty in " + +855 TableName.META_TABLE_NAME + ", row=" + regionInfoRow); +856 } +857 +858 // possible we got a region of a different table... +859 if (!regionInfo.getTable().equals(tableName)) { +860 throw new TableNotFoundException( +861 "Region of '" + regionInfo.getRegionNameAsString() + "' is expected in the table of '" + tableName + "', " + +862 "but hbase:meta says it is in the table of '" + regionInfo.getTable() + "'. " + +863 "hbase:meta might be damaged."); +864 } +865 if (regionInfo.isSplit()) { +866 throw new RegionOfflineException ("Region for row is a split parent, daughters not online: " + +867 regionInfo.getRegionNameAsString()); +868 } +869 if (regionInfo.isOffline()) { +870 throw new RegionOfflineException("Region offline; disable table call? " + +871 regionInfo.getRegionNameAsString()); +872 } +873 +874 ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); +875 if (serverName == null) { +876 throw new NoServerForRegionException("No server address listed in " +877 + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() +878 + " containing row " + Bytes.toStringBinary(row)); +879 } +880 +881 if (isDeadServer(serverName)){ +882 throw new RegionServerStoppedException("hbase:meta says the region "+ +883 regionInfo.getRegionNameAsString()+" is managed by the server " + serverName + +884 ", but it is dead."); +885 } +886 // Instantiate the location +887 cacheLocation(tableName, locations); +888 return locations; +889 } catch (TableNotFoundException e) { +890 // if we got this error, probably means the table just plain doesn't +891 // exist. rethrow the error immediately. this should always be coming +892 // from the HTable constructor. +893 throw e; +894 } catch (IOException e) { +895 ExceptionUtil.rethrowIfInterrupt(e); +896 if (e instanceof RemoteException) { +897 e = ((RemoteException)e).unwrapRemoteException(); +898 } +899 if (e instanceof CallQueueTooBigException) { +900 // Give a special check on CallQueueTooBigException, see #HBASE-17114 +901 pauseBase = this.pauseForCQTBE; +902 } +903 if (tries < maxAttempts - 1) { +904 if (LOG.isDebugEnabled()) { +905 LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME +906 + ", metaLocation=" + ", attempt=" + tries + " of " + maxAttempts +907 + " failed; retrying after sleep of " +908 + ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + e.getMessage()); +909 } +910 } else { +911 throw e; +912 } +913 // Only relocate the parent region if necessary +914 if(!(e instanceof RegionOfflineException || +915 e instanceof NoServerForRegionException)) { +916 relocateRegion(TableName.META_TABLE_NAME, metaStartKey, replicaId); +917 } +918 } finally { +919 userRegionLock.unlock(); +920 } +921 try{ +922 Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries)); +923 } catch (InterruptedException e) { +924 throw new InterruptedIOException("Giving up trying to location region in " + +925 "meta: thread is interrupted."); +926 } +927 } +928 } +929 +930 /** +931 * Put a newly discovered HRegionLocation into the cache. +932 * @param tableName The table name. +933 * @param location the new location +934 */ +935 @Override +936 public void cacheLocation(final TableName tableName, final RegionLocations location) { +937 metaCache.cacheLocation(tableName, location); +938 } +939 +940 /** +941 * Search the cache for a location that fits our table and row key. +942 * Return null if no suitable region is located. +943 * @return Null or region location found in cache. +944 */ +945 RegionLocations getCachedLocation(final TableName tableName, +946 final byte [] row) { +947 return metaCache.getCachedLocation(tableName, row); +948 } +949 +950 public void clearRegionCache(final TableName tableName, byte[] row) { +951 metaCache.clearCache(tableName, row); +952 } +953 +954 /* +955 * Delete all cached entries of a table that maps to a specific location. +956 */ +957 @Override +958 public void clearCaches(final ServerName serverName) { +959 metaCache.clearCache(serverName); +960 } +961 +962 @Override +963 public void clearRegionCache() { +964 metaCache.clearCache(); +965 } +966 +967 @Override +968 public void clearRegionCache(final TableName tableName) { +969 metaCache.clearCache(tableName); +970 } +971 +972 /** +973 * Put a newly discovered HRegionLocation into the cache. +974 * @param tableName The table name. +975 * @param source the source of the new location, if it's not coming from meta +976 * @param location the new location +977 */ +978 private void cacheLocation(final TableName tableName, final ServerName source, +979 final HRegionLocation location) { +980 metaCache.cacheLocation(tableName, source, location); +981 } 982 -983 /** -984 * State of the MasterService connection/setup. -985 */ -986 static class MasterServiceState { -987 Connection connection; -988 -989 MasterProtos.MasterService.BlockingInterface stub; -990 int userCount; +983 // Map keyed by service name + regionserver to service stub implementation +984 private final ConcurrentMap<String, Object> stubs = new ConcurrentHashMap<>(); +985 +986 /** +987 * State of the MasterService connection/setup. +988 */ +989 static class MasterServiceState { +990 Connection connection; 991 -992 MasterServiceState(final Connection connection) { -993 super(); -994 this.connection = connection; -995 } -996 -997 @Override -998 public String toString() { -999 return "MasterService"; -1000 } -1001 -1002 Object getStub() { -1003 return this.stub; -1004 } -1005 -1006 void clearStub() { -1007 this.stub = null; -1008 } -1009 -1010 boolean isMasterRunning() throws IOException { -1011 MasterProtos.IsMasterRunningResponse response = null; -1012 try { -1013 response = this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); -1014 } catch (Exception e) { -1015 throw ProtobufUtil.handleRemoteException(e); -1016 } -1017 return response != null? response.getIsMasterRunning(): false; -1018 } -1019 } -1020 -1021 /** -1022 * The record of errors for servers. -1023 */ -1024 static class ServerErrorTracker { -1025 // We need a concurrent map here, as we could have multiple threads updating it in parallel. -1026 private final ConcurrentMap<ServerName, ServerErrors> errorsByServer = new ConcurrentHashMap<>(); -1027 private final long canRetryUntil; -1028 private final int maxTries;// max number to try -1029 private final long startTrackingTime; -1030 -1031 /** -1032 * Constructor -1033 * @param timeout how long to wait before timeout, in unit of millisecond -1034 * @param maxTries how many times to try -1035 */ -1036 public ServerErrorTracker(long timeout, int maxTries) { -1037 this.maxTries = maxTries; -1038 this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; -1039 this.startTrackingTime = new Date().getTime(); -1040 } -1041 -1042 /** -1043 * We stop to retry when we have exhausted BOTH the number of tries and the time allocated. -1044 * @param numAttempt how many times we have tried by now -1045 */ -1046 boolean canTryMore(int numAttempt) { -1047 // If there is a single try we must not take into account the time. -1048 return numAttempt < maxTries || (maxTries > 1 && -1049 EnvironmentEdgeManager.currentTime() < this.canRetryUntil); -1050 } -1051 -1052 /** -1053 * Calculates the back-off time for a retrying request to a particular server. -1054 * -1055 * @param server The server in question. -1056 * @param basePause The default hci pause. -1057 * @return The time to wait before sending next request. -1058 */ -1059 long calculateBackoffTime(ServerName server, long basePause) { -1060 long result; -1061 ServerErrors errorStats = errorsByServer.get(server); -1062 if (errorStats != null) { -1063 result = ConnectionUtils.getPauseTime(basePause, Math.max(0, errorStats.getCount() - 1)); -1064 } else { -1065 result = 0; // yes, if the server is not in our list we don't wait before retrying. -1066 } -1067 return result; -1068 } -1069 -1070 /** -1071 * Reports that there was an error on the server to do whatever bean-counting necessary. -1072 * @param server The server in question. -1073 */ -1074 void reportServerError(ServerName server) { -1075 computeIfAbsent(errorsByServer, server, ServerErrors::new).addError(); -1076 } -1077 -1078 long getStartTrackingTime() { -1079 return startTrackingTime; -1080 } -1081 -1082 /** -1083 * The record of errors for a server. -1084 */ -1085 private static class ServerErrors { -1086 private final AtomicInteger retries = new AtomicInteger(0); -1087 -1088 public int getCount() { -1089 return retries.get(); -1090 } -1091 -1092 public void addError() { -1093 retries.incrementAndGet(); -1094 } -1095 } -1096 } -1097 -1098 /** -1099 * Class to make a MasterServiceStubMaker stub. -1100 */ -1101 private final class MasterServiceStubMaker { -1102 -1103 private void isMasterRunning(MasterProtos.MasterService.BlockingInterface stub) -1104 throws IOException { -1105 try { -1106 stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); -1107 } catch (ServiceException e) { -1108 throw ProtobufUtil.handleRemoteException(e); -1109 } -1110 } -1111 -1112 /** -1113 * Create a stub. Try once only. It is not typed because there is no common type to protobuf -1114 * services nor their interfaces. Let the caller do appropriate casting. -1115 * @return A stub for master services. -1116 */ -1117 private MasterProtos.MasterService.BlockingInterface makeStubNoRetries() -1118 throws IOException, KeeperException { -1119 ServerName sn = get(registry.getMasterAddress()); -1120 if (sn == null) { -1121 String msg = "ZooKeeper available but no active master location found"; -1122 LOG.info(msg); -1123 throw new MasterNotRunningException(msg); -1124 } -1125 if (isDeadServer(sn)) { -1126 throw new MasterNotRunningException(sn + " is dead."); +992 MasterProtos.MasterService.BlockingInterface stub; +993 int userCount; +994 +995 MasterServiceState(final Connection connection) { +996 super(); +997 this.connection = connection; +998 } +999 +1000 @Override +1001 public String toString() { +1002 return "MasterService"; +1003 } +1004 +1005 Object getStub() { +1006 return this.stub; +1007 } +1008 +1009 void clearStub() { +1010 this.stub = null; +1011 } +1012 +1013 boolean isMasterRunning() throws IOException { +1014 MasterProtos.IsMasterRunningResponse response = null; +1015 try { +1016 response = this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); +1017 } catch (Exception e) { +1018 throw ProtobufUtil.handleRemoteException(e); +1019 } +1020 return response != null? response.getIsMasterRunning(): false; +1021 } +1022 } +1023 +1024 /** +1025 * The record of errors for servers. +1026 */ +1027 static class ServerErrorTracker { +1028 // We need a concurrent map here, as we could have multiple threads updating it in parallel. +1029 private final ConcurrentMap<ServerName, ServerErrors> errorsByServer = new ConcurrentHashMap<>(); +1030 private final long canRetryUntil; +1031 private final int maxTries;// max number to try +1032 private final long startTrackingTime; +1033 +1034 /** +1035 * Constructor +1036 * @param timeout how long to wait before timeout, in unit of millisecond +1037 * @param maxTries how many times to try +1038 */ +1039 public ServerErrorTracker(long timeout, int maxTries) { +1040 this.maxTries = maxTries; +1041 this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; +1042 this.startTrackingTime = new Date().getTime(); +1043 } +1044 +1045 /** +1046 * We stop to retry when we have exhausted BOTH the number of tries and the time allocated. +1047 * @param numAttempt how many times we have tried by now +1048 */ +1049 boolean canTryMore(int numAttempt) { +1050 // If there is a single try we must not take into account the time. +1051 return numAttempt < maxTries || (maxTries > 1 && +1052 EnvironmentEdgeManager.currentTime() < this.canRetryUntil); +1053 } +1054 +1055 /** +1056 * Calculates the back-off time for a retrying request to a particular server. +1057 * +1058 * @param server The server in question. +1059 * @param basePause The default hci pause. +1060 * @return The time to wait before sending next request. +1061 */ +1062 long calculateBackoffTime(ServerName server, long basePause) { +1063 long result; +1064 ServerErrors errorStats = errorsByServer.get(server); +1065 if (errorStats != null) { +1066 result = ConnectionUtils.getPauseTime(basePause, Math.max(0, errorStats.getCount() - 1)); +1067 } else { +1068 result = 0; // yes, if the server is not in our list we don't wait before retrying. +1069 } +1070 return result; +1071 } +1072 +1073 /** +1074 * Reports that there was an error on the server to do whatever bean-counting necessary. +1075 * @param server The server in question. +1076 */ +1077 void reportServerError(ServerName server) { +1078 computeIfAbsent(errorsByServer, server, ServerErrors::new).addError(); +1079 } +1080 +1081 long getStartTrackingTime() { +1082 return startTrackingTime; +1083 } +1084 +1085 /** +1086 * The record of errors for a server. +1087 */ +1088 private static class ServerErrors { +1089 private final AtomicInteger retries = new AtomicInteger(0); +1090 +1091 public int getCount() { +1092 return retries.get(); +1093 } +1094 +1095 public void addError() { +1096 retries.incrementAndGet(); +1097 } +1098 } +1099 } +1100 +1101 /** +1102 * Class to make a MasterServiceStubMaker stub. +1103 */ +1104 private final class MasterServiceStubMaker { +1105 +1106 private void isMasterRunning(MasterProtos.MasterService.BlockingInterface stub) +1107 throws IOException { +1108 try { +1109 stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); +1110 } catch (ServiceException e) { +1111 throw ProtobufUtil.handleRemoteException(e); +1112 } +1113 } +1114 +1115 /** +1116 * Create a stub. Try once only. It is not typed because there is no common type to protobuf +1117 * services nor their interfaces. Let the caller do appropriate casting. +1118 * @return A stub for master services. +1119 */ +