Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 665A1200C3E for ; Tue, 21 Mar 2017 17:36:03 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 6510B160B93; Tue, 21 Mar 2017 16:36:03 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 49CD0160B9D for ; Tue, 21 Mar 2017 17:36:01 +0100 (CET) Received: (qmail 13376 invoked by uid 500); 21 Mar 2017 16:36:00 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 10642 invoked by uid 99); 21 Mar 2017 16:35:58 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 21 Mar 2017 16:35:58 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 4485DDFE8F; Tue, 21 Mar 2017 16:35:58 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: misty@apache.org To: commits@hbase.apache.org Date: Tue, 21 Mar 2017 16:36:13 -0000 Message-Id: <4db36ec880c841269e4b2298275981df@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [17/52] [partial] hbase-site git commit: Published site at 1cfd22bf43c9b64afae35d9bf16f764d0da80cab. archived-at: Tue, 21 Mar 2017 16:36:03 -0000 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/client/ScanResultConsumer.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/ScanResultConsumer.html b/apidocs/src-html/org/apache/hadoop/hbase/client/ScanResultConsumer.html index 7f2ddf1..c6e88dd 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/ScanResultConsumer.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/ScanResultConsumer.html @@ -27,33 +27,42 @@ 019 020import org.apache.hadoop.hbase.classification.InterfaceAudience; 021import org.apache.hadoop.hbase.classification.InterfaceStability; -022 -023/** -024 * Receives {@link Result} for an asynchronous scan. -025 */ -026@InterfaceAudience.Public -027@InterfaceStability.Unstable -028public interface ScanResultConsumer { -029 -030 /** -031 * @param result the data fetched from HBase service. -032 * @return {@code false} if you want to terminate the scan process. Otherwise {@code true} -033 */ -034 boolean onNext(Result result); -035 -036 /** -037 * Indicate that we hit an unrecoverable error and the scan operation is terminated. -038 * <p> -039 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. -040 */ -041 void onError(Throwable error); -042 -043 /** -044 * Indicate that the scan operation is completed normally. -045 */ -046 void onComplete(); -047 -048} +022import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +023 +024/** +025 * Receives {@link Result} for an asynchronous scan. +026 */ +027@InterfaceAudience.Public +028@InterfaceStability.Unstable +029public interface ScanResultConsumer { +030 +031 /** +032 * @param result the data fetched from HBase service. +033 * @return {@code false} if you want to terminate the scan process. Otherwise {@code true} +034 */ +035 boolean onNext(Result result); +036 +037 /** +038 * Indicate that we hit an unrecoverable error and the scan operation is terminated. +039 * <p> +040 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. +041 */ +042 void onError(Throwable error); +043 +044 /** +045 * Indicate that the scan operation is completed normally. +046 */ +047 void onComplete(); +048 +049 /** +050 * If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to +051 * all other methods in this interface to give you the {@link ScanMetrics} instance for this scan +052 * operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can +053 * store it somewhere to get the metrics at any time if you want. +054 */ +055 default void onScanMetricsCreated(ScanMetrics scanMetrics) { +056 } +057} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/client/TableSnapshotScanner.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/TableSnapshotScanner.html b/apidocs/src-html/org/apache/hadoop/hbase/client/TableSnapshotScanner.html index f4ced21..11f3dbd 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/TableSnapshotScanner.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/TableSnapshotScanner.html @@ -135,7 +135,7 @@ 127 final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd(); 128 129 htd = meta.getTableDescriptor(); -130 regions = new ArrayList<HRegionInfo>(restoredRegions.size()); +130 regions = new ArrayList<>(restoredRegions.size()); 131 for (HRegionInfo hri: restoredRegions) { 132 if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), 133 hri.getStartKey(), hri.getEndKey())) { http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html b/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html index b2f1221..b9503b7 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html @@ -26,49 +26,49 @@ 018 */ 019package org.apache.hadoop.hbase.client.replication; 020 -021import com.google.common.annotations.VisibleForTesting; -022import com.google.common.collect.Lists; -023 -024import java.io.Closeable; -025import java.io.IOException; -026import java.util.ArrayList; -027import java.util.Collection; -028import java.util.HashMap; -029import java.util.HashSet; -030import java.util.List; -031import java.util.Map; -032import java.util.TreeMap; -033import java.util.Map.Entry; -034import java.util.Set; -035 -036import org.apache.commons.logging.Log; -037import org.apache.commons.logging.LogFactory; -038import org.apache.hadoop.conf.Configuration; -039import org.apache.hadoop.hbase.Abortable; -040import org.apache.hadoop.hbase.HColumnDescriptor; -041import org.apache.hadoop.hbase.HConstants; -042import org.apache.hadoop.hbase.HTableDescriptor; -043import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; -044import org.apache.hadoop.hbase.TableName; -045import org.apache.hadoop.hbase.TableNotFoundException; -046import org.apache.hadoop.hbase.classification.InterfaceAudience; -047import org.apache.hadoop.hbase.classification.InterfaceStability; -048import org.apache.hadoop.hbase.client.Admin; -049import org.apache.hadoop.hbase.client.Connection; -050import org.apache.hadoop.hbase.client.ConnectionFactory; -051import org.apache.hadoop.hbase.client.HBaseAdmin; -052import org.apache.hadoop.hbase.client.RegionLocator; -053import org.apache.hadoop.hbase.replication.ReplicationException; -054import org.apache.hadoop.hbase.replication.ReplicationFactory; -055import org.apache.hadoop.hbase.replication.ReplicationPeer; -056import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -057import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -058import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; -059import org.apache.hadoop.hbase.replication.ReplicationPeers; -060import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; -061import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; -062import org.apache.hadoop.hbase.util.Pair; -063import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +021import java.io.Closeable; +022import java.io.IOException; +023import java.util.ArrayList; +024import java.util.Collection; +025import java.util.HashMap; +026import java.util.HashSet; +027import java.util.List; +028import java.util.Map; +029import java.util.Map.Entry; +030import java.util.Set; +031import java.util.TreeMap; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.Abortable; +037import org.apache.hadoop.hbase.HColumnDescriptor; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HTableDescriptor; +040import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; +041import org.apache.hadoop.hbase.TableName; +042import org.apache.hadoop.hbase.TableNotFoundException; +043import org.apache.hadoop.hbase.classification.InterfaceAudience; +044import org.apache.hadoop.hbase.classification.InterfaceStability; +045import org.apache.hadoop.hbase.client.Admin; +046import org.apache.hadoop.hbase.client.Connection; +047import org.apache.hadoop.hbase.client.ConnectionFactory; +048import org.apache.hadoop.hbase.client.HBaseAdmin; +049import org.apache.hadoop.hbase.client.RegionLocator; +050import org.apache.hadoop.hbase.replication.ReplicationException; +051import org.apache.hadoop.hbase.replication.ReplicationFactory; +052import org.apache.hadoop.hbase.replication.ReplicationPeer; +053import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +054import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +055import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; +056import org.apache.hadoop.hbase.replication.ReplicationPeers; +057import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; +058import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; +059import org.apache.hadoop.hbase.util.Pair; +060import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +061 +062import com.google.common.annotations.VisibleForTesting; +063import com.google.common.collect.Lists; 064 065/** 066 * <p> @@ -281,7 +281,7 @@ 273 @Deprecated 274 public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException { 275 List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers(); -276 Map<String, ReplicationPeerConfig> result = new TreeMap<String, ReplicationPeerConfig>(); +276 Map<String, ReplicationPeerConfig> result = new TreeMap<>(); 277 for (ReplicationPeerDescription peer : peers) { 278 result.put(peer.getPeerId(), peer.getPeerConfig()); 279 } @@ -351,7 +351,7 @@ 343 if (cfs == null || appendCfs == null || appendCfs.isEmpty()) { 344 preTableCfs.put(table, null); 345 } else { -346 Set<String> cfSet = new HashSet<String>(cfs); +346 Set<String> cfSet = new HashSet<>(cfs); 347 cfSet.addAll(appendCfs); 348 preTableCfs.put(table, Lists.newArrayList(cfSet)); 349 } @@ -408,7 +408,7 @@ 400 if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) { 401 preTableCfs.remove(table); 402 } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) { -403 Set<String> cfSet = new HashSet<String>(cfs); +403 Set<String> cfSet = new HashSet<>(cfs); 404 cfSet.removeAll(removeCfs); 405 if (cfSet.isEmpty()) { 406 preTableCfs.remove(table); @@ -492,7 +492,7 @@ 484 tableCFs.getColumnFamilyMap() 485 .forEach( 486 (cf, scope) -> { -487 HashMap<String, String> replicationEntry = new HashMap<String, String>(); +487 HashMap<String, String> replicationEntry = new HashMap<>(); 488 replicationEntry.put(TNAME, table); 489 replicationEntry.put(CFNAME, cf); 490 replicationEntry.put(REPLICATIONTYPE, @@ -529,66 +529,71 @@ 521 } 522 523 @VisibleForTesting -524 public void peerAdded(String id) throws ReplicationException { -525 this.replicationPeers.peerConnected(id); -526 } -527 -528 @VisibleForTesting -529 List<ReplicationPeer> listReplicationPeers() throws IOException { -530 Map<String, ReplicationPeerConfig> peers = listPeerConfigs(); -531 if (peers == null || peers.size() <= 0) { -532 return null; -533 } -534 List<ReplicationPeer> listOfPeers = new ArrayList<ReplicationPeer>(peers.size()); -535 for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) { -536 String peerId = peerEntry.getKey(); -537 try { -538 Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId); -539 Configuration peerConf = pair.getSecond(); -540 ReplicationPeer peer = new ReplicationPeerZKImpl(zkw, pair.getSecond(), -541 peerId, pair.getFirst(), this.connection); -542 listOfPeers.add(peer); -543 } catch (ReplicationException e) { -544 LOG.warn("Failed to get valid replication peers. " -545 + "Error connecting to peer cluster with peerId=" + peerId + ". Error message=" -546 + e.getMessage()); -547 LOG.debug("Failure details to get valid replication peers.", e); -548 continue; -549 } -550 } -551 return listOfPeers; -552 } -553 -554 /** -555 * Set a namespace in the peer config means that all tables in this namespace -556 * will be replicated to the peer cluster. -557 * -558 * 1. If you already have set a namespace in the peer config, then you can't set any table -559 * of this namespace to the peer config. -560 * 2. If you already have set a table in the peer config, then you can't set this table's -561 * namespace to the peer config. +524 @Deprecated +525 public void peerAdded(String id) throws ReplicationException { +526 this.replicationPeers.peerConnected(id); +527 } +528 +529 /** +530 * @deprecated use {@link org.apache.hadoop.hbase.client.Admin#listReplicationPeers()} instead +531 */ +532 @VisibleForTesting +533 @Deprecated +534 List<ReplicationPeer> listReplicationPeers() throws IOException { +535 Map<String, ReplicationPeerConfig> peers = listPeerConfigs(); +536 if (peers == null || peers.size() <= 0) { +537 return null; +538 } +539 List<ReplicationPeer> listOfPeers = new ArrayList<>(peers.size()); +540 for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) { +541 String peerId = peerEntry.getKey(); +542 try { +543 Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId); +544 Configuration peerConf = pair.getSecond(); +545 ReplicationPeer peer = new ReplicationPeerZKImpl(zkw, pair.getSecond(), +546 peerId, pair.getFirst(), this.connection); +547 listOfPeers.add(peer); +548 } catch (ReplicationException e) { +549 LOG.warn("Failed to get valid replication peers. " +550 + "Error connecting to peer cluster with peerId=" + peerId + ". Error message=" +551 + e.getMessage()); +552 LOG.debug("Failure details to get valid replication peers.", e); +553 continue; +554 } +555 } +556 return listOfPeers; +557 } +558 +559 /** +560 * Set a namespace in the peer config means that all tables in this namespace +561 * will be replicated to the peer cluster. 562 * -563 * @param namespaces -564 * @param tableCfs -565 * @throws ReplicationException -566 */ -567 private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces, -568 Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException { -569 if (namespaces == null || namespaces.isEmpty()) { -570 return; -571 } -572 if (tableCfs == null || tableCfs.isEmpty()) { -573 return; -574 } -575 for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) { -576 TableName table = entry.getKey(); -577 if (namespaces.contains(table.getNamespaceAsString())) { -578 throw new ReplicationException( -579 "Table-cfs config conflict with namespaces config in peer"); -580 } -581 } -582 } -583} +563 * 1. If you already have set a namespace in the peer config, then you can't set any table +564 * of this namespace to the peer config. +565 * 2. If you already have set a table in the peer config, then you can't set this table's +566 * namespace to the peer config. +567 * +568 * @param namespaces +569 * @param tableCfs +570 * @throws ReplicationException +571 */ +572 private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces, +573 Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException { +574 if (namespaces == null || namespaces.isEmpty()) { +575 return; +576 } +577 if (tableCfs == null || tableCfs.isEmpty()) { +578 return; +579 } +580 for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) { +581 TableName table = entry.getKey(); +582 if (namespaces.contains(table.getNamespaceAsString())) { +583 throw new ReplicationException( +584 "Table-cfs config conflict with namespaces config in peer"); +585 } +586 } +587 } +588} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/errorhandling/ForeignException.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/errorhandling/ForeignException.html b/apidocs/src-html/org/apache/hadoop/hbase/errorhandling/ForeignException.html index ab67bc0..8540fe3 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/errorhandling/ForeignException.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/errorhandling/ForeignException.html @@ -115,92 +115,91 @@ 107 // if there is no stack trace, ignore it and just return the message 108 if (trace == null) return null; 109 // build the stack trace for the message -110 List<StackTraceElementMessage> pbTrace = -111 new ArrayList<StackTraceElementMessage>(trace.length); -112 for (StackTraceElement elem : trace) { -113 StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); -114 stackBuilder.setDeclaringClass(elem.getClassName()); -115 stackBuilder.setFileName(elem.getFileName()); -116 stackBuilder.setLineNumber(elem.getLineNumber()); -117 stackBuilder.setMethodName(elem.getMethodName()); -118 pbTrace.add(stackBuilder.build()); -119 } -120 return pbTrace; -121 } -122 -123 /** -124 * This is a Proxy Throwable that contains the information of the original remote exception -125 */ -126 private static class ProxyThrowable extends Throwable { -127 ProxyThrowable(String msg, StackTraceElement[] trace) { -128 super(msg); -129 this.setStackTrace(trace); -130 } -131 } -132 -133 /** -134 * Converts a ForeignException to an array of bytes. -135 * @param source the name of the external exception source -136 * @param t the "local" external exception (local) -137 * @return protobuf serialized version of ForeignException -138 */ -139 public static byte[] serialize(String source, Throwable t) { -140 GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder(); -141 gemBuilder.setClassName(t.getClass().getName()); -142 if (t.getMessage() != null) { -143 gemBuilder.setMessage(t.getMessage()); -144 } -145 // set the stack trace, if there is one -146 List<StackTraceElementMessage> stack = -147 ForeignException.toStackTraceElementMessages(t.getStackTrace()); -148 if (stack != null) { -149 gemBuilder.addAllTrace(stack); -150 } -151 GenericExceptionMessage payload = gemBuilder.build(); -152 ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder(); -153 exception.setGenericException(payload).setSource(source); -154 ForeignExceptionMessage eem = exception.build(); -155 return eem.toByteArray(); -156 } -157 -158 /** -159 * Takes a series of bytes and tries to generate an ForeignException instance for it. -160 * @param bytes -161 * @return the ForeignExcpetion instance -162 * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. -163 * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException -164 */ -165 public static ForeignException deserialize(byte[] bytes) -166 throws IOException { -167 // figure out the data we need to pass -168 ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes); -169 GenericExceptionMessage gem = eem.getGenericException(); -170 StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList()); -171 ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace); -172 ForeignException e = new ForeignException(eem.getSource(), dfe); -173 return e; -174 } -175 -176 /** -177 * Unwind a serialized array of {@link StackTraceElementMessage}s to a -178 * {@link StackTraceElement}s. -179 * @param traceList list that was serialized -180 * @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on -181 * the sender). -182 */ -183 private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) { -184 if (traceList == null || traceList.isEmpty()) { -185 return new StackTraceElement[0]; // empty array -186 } -187 StackTraceElement[] trace = new StackTraceElement[traceList.size()]; -188 for (int i = 0; i < traceList.size(); i++) { -189 StackTraceElementMessage elem = traceList.get(i); -190 trace[i] = new StackTraceElement( -191 elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber()); -192 } -193 return trace; -194 } -195} +110 List<StackTraceElementMessage> pbTrace = new ArrayList<>(trace.length); +111 for (StackTraceElement elem : trace) { +112 StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); +113 stackBuilder.setDeclaringClass(elem.getClassName()); +114 stackBuilder.setFileName(elem.getFileName()); +115 stackBuilder.setLineNumber(elem.getLineNumber()); +116 stackBuilder.setMethodName(elem.getMethodName()); +117 pbTrace.add(stackBuilder.build()); +118 } +119 return pbTrace; +120 } +121 +122 /** +123 * This is a Proxy Throwable that contains the information of the original remote exception +124 */ +125 private static class ProxyThrowable extends Throwable { +126 ProxyThrowable(String msg, StackTraceElement[] trace) { +127 super(msg); +128 this.setStackTrace(trace); +129 } +130 } +131 +132 /** +133 * Converts a ForeignException to an array of bytes. +134 * @param source the name of the external exception source +135 * @param t the "local" external exception (local) +136 * @return protobuf serialized version of ForeignException +137 */ +138 public static byte[] serialize(String source, Throwable t) { +139 GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder(); +140 gemBuilder.setClassName(t.getClass().getName()); +141 if (t.getMessage() != null) { +142 gemBuilder.setMessage(t.getMessage()); +143 } +144 // set the stack trace, if there is one +145 List<StackTraceElementMessage> stack = +146 ForeignException.toStackTraceElementMessages(t.getStackTrace()); +147 if (stack != null) { +148 gemBuilder.addAllTrace(stack); +149 } +150 GenericExceptionMessage payload = gemBuilder.build(); +151 ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder(); +152 exception.setGenericException(payload).setSource(source); +153 ForeignExceptionMessage eem = exception.build(); +154 return eem.toByteArray(); +155 } +156 +157 /** +158 * Takes a series of bytes and tries to generate an ForeignException instance for it. +159 * @param bytes +160 * @return the ForeignExcpetion instance +161 * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. +162 * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException +163 */ +164 public static ForeignException deserialize(byte[] bytes) +165 throws IOException { +166 // figure out the data we need to pass +167 ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes); +168 GenericExceptionMessage gem = eem.getGenericException(); +169 StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList()); +170 ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace); +171 ForeignException e = new ForeignException(eem.getSource(), dfe); +172 return e; +173 } +174 +175 /** +176 * Unwind a serialized array of {@link StackTraceElementMessage}s to a +177 * {@link StackTraceElement}s. +178 * @param traceList list that was serialized +179 * @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on +180 * the sender). +181 */ +182 private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) { +183 if (traceList == null || traceList.isEmpty()) { +184 return new StackTraceElement[0]; // empty array +185 } +186 StackTraceElement[] trace = new StackTraceElement[traceList.size()]; +187 for (int i = 0; i < traceList.size(); i++) { +188 StackTraceElementMessage elem = traceList.get(i); +189 trace[i] = new StackTraceElement( +190 elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber()); +191 } +192 return trace; +193 } +194} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html index 52dbaf6..bb7c162 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html @@ -185,7 +185,7 @@ 177 " can only be used with EQUAL and NOT_EQUAL"); 178 } 179 } -180 ArrayList<Object> arguments = new ArrayList<Object>(2); +180 ArrayList<Object> arguments = new ArrayList<>(2); 181 arguments.add(compareOp); 182 arguments.add(comparator); 183 return arguments; http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html index 52dbaf6..bb7c162 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html @@ -185,7 +185,7 @@ 177 " can only be used with EQUAL and NOT_EQUAL"); 178 } 179 } -180 ArrayList<Object> arguments = new ArrayList<Object>(2); +180 ArrayList<Object> arguments = new ArrayList<>(2); 181 arguments.add(compareOp); 182 arguments.add(comparator); 183 return arguments; http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html index ef89262..68f3990 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html @@ -62,7 +62,7 @@ 054 protected byte[] columnQualifier; 055 protected boolean dropDependentColumn; 056 -057 protected Set<Long> stampSet = new HashSet<Long>(); +057 protected Set<Long> stampSet = new HashSet<>(); 058 059 /** 060 * Build a dependent column filter with value checking http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html index a7878d7..99bb715 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html @@ -461,7 +461,7 @@ 453 throw new DeserializationException(e); 454 } 455 -456 List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount()); +456 List<Filter> rowFilters = new ArrayList<>(proto.getFiltersCount()); 457 try { 458 List<FilterProtos.Filter> filtersList = proto.getFiltersList(); 459 int listSize = filtersList.size(); http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html index a7878d7..99bb715 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html @@ -461,7 +461,7 @@ 453 throw new DeserializationException(e); 454 } 455 -456 List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount()); +456 List<Filter> rowFilters = new ArrayList<>(proto.getFiltersCount()); 457 try { 458 List<FilterProtos.Filter> filtersList = proto.getFiltersList(); 459 int listSize = filtersList.size(); http://git-wip-us.apache.org/repos/asf/hbase-site/blob/22cff34f/apidocs/src-html/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.html ---------------------------------------------------------------------- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.html index 6548544..b1d4056 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.html @@ -49,7 +49,7 @@ 041 * Note : It may emit KVs which do not have the given columns in them, if 042 * these KVs happen to occur before a KV which does have a match. Given this 043 * caveat, this filter is only useful for special cases -044 * like {@link org.apache.hadoop.hbase.mapreduce.RowCounter}. +044 * like org.apache.hadoop.hbase.mapreduce.RowCounter. 045 * <p> 046 * @deprecated Deprecated in 2.0. See HBASE-13347 047 */ @@ -116,7 +116,7 @@ 108 throw new DeserializationException(e); 109 } 110 -111 TreeSet<byte []> qualifiers = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR); +111 TreeSet<byte []> qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR); 112 for (ByteString qualifier : proto.getQualifiersList()) { 113 qualifiers.add(qualifier.toByteArray()); 114 }