Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id DE329F80D for ; Fri, 3 May 2013 03:59:06 +0000 (UTC) Received: (qmail 2758 invoked by uid 500); 3 May 2013 03:59:06 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 2729 invoked by uid 500); 3 May 2013 03:59:06 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 2715 invoked by uid 99); 3 May 2013 03:59:06 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 03 May 2013 03:59:06 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,T_FRT_STOCK2 X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 03 May 2013 03:59:01 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 955672388B34; Fri, 3 May 2013 03:58:41 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1478639 [5/10] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java... Date: Fri, 03 May 2013 03:58:35 -0000 To: commits@hbase.apache.org From: stack@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130503035841.955672388B34@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java Fri May 3 03:58:33 2013 @@ -0,0 +1,1591 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import javax.net.SocketFactory; +import javax.security.sasl.SaslException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.codec.KeyValueCodec; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.hbase.protobuf.generated.Tracing.RPCTInfo; +import org.apache.hadoop.hbase.security.AuthMethod; +import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; +import org.apache.hadoop.hbase.security.SecurityInfo; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.PoolMap; +import org.apache.hadoop.hbase.util.PoolMap.PoolType; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.TokenSelector; +import org.cloudera.htrace.Span; +import org.cloudera.htrace.Trace; + +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; + + +/** + * Does RPC against a cluster. Manages connections per regionserver in the cluster. + *

See HBaseServer + */ +@InterfaceAudience.Private +public class RpcClient { + // The LOG key is intentionally not from this package to avoid ipc logging at DEBUG (all under + // o.a.h.hbase is set to DEBUG as default). + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.RpcClient"); + protected final PoolMap connections; + + protected int counter; // counter for call ids + protected final AtomicBoolean running = new AtomicBoolean(true); // if client runs + final protected Configuration conf; + final protected int maxIdleTime; // connections will be culled if it was idle for + // maxIdleTime microsecs + final protected int maxRetries; //the max. no. of retries for socket connections + final protected long failureSleep; // Time to sleep before retry on failure. + protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm + protected final boolean tcpKeepAlive; // if T then use keepalives + protected int pingInterval; // how often sends ping to the server in msecs + protected FailedServers failedServers; + private final Codec codec; + private final CompressionCodec compressor; + private final IPCUtil ipcUtil; + + protected final SocketFactory socketFactory; // how to create sockets + protected String clusterId; + + final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; + final private static String SOCKET_TIMEOUT = "ipc.socket.timeout"; + final static int DEFAULT_PING_INTERVAL = 60000; // 1 min + final static int DEFAULT_SOCKET_TIMEOUT = 20000; // 20 seconds + final static int PING_CALL_ID = -1; + + public final static String FAILED_SERVER_EXPIRY_KEY = "hbase.ipc.client.failed.servers.expiry"; + public final static int FAILED_SERVER_EXPIRY_DEFAULT = 2000; + + // thread-specific RPC timeout, which may override that of what was passed in. + // TODO: Verify still being used. + private static ThreadLocal rpcTimeout = new ThreadLocal() { + @Override + protected Integer initialValue() { + return HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; + } + }; + + /** + * A class to manage a list of servers that failed recently. + */ + static class FailedServers { + private final LinkedList> failedServers = new + LinkedList>(); + private final int recheckServersTimeout; + + FailedServers(Configuration conf) { + this.recheckServersTimeout = conf.getInt( + FAILED_SERVER_EXPIRY_KEY, FAILED_SERVER_EXPIRY_DEFAULT); + } + + /** + * Add an address to the list of the failed servers list. + */ + public synchronized void addToFailedServers(InetSocketAddress address) { + final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout; + failedServers.addFirst(new Pair(expiry, address.toString())); + } + + /** + * Check if the server should be considered as bad. Clean the old entries of the list. + * + * @return true if the server is in the failed servers list + */ + public synchronized boolean isFailedServer(final InetSocketAddress address) { + if (failedServers.isEmpty()) { + return false; + } + + final String lookup = address.toString(); + final long now = EnvironmentEdgeManager.currentTimeMillis(); + + // iterate, looking for the search entry and cleaning expired entries + Iterator> it = failedServers.iterator(); + while (it.hasNext()) { + Pair cur = it.next(); + if (cur.getFirst() < now) { + it.remove(); + } else { + if (lookup.equals(cur.getSecond())) { + return true; + } + } + } + + return false; + } + } + + @SuppressWarnings("serial") + public static class FailedServerException extends IOException { + public FailedServerException(String s) { + super(s); + } + } + + /** + * set the ping interval value in configuration + * + * @param conf Configuration + * @param pingInterval the ping interval + */ + // Any reason we couldn't just do tcp keepalive instead of this pingery? + // St.Ack 20130121 + public static void setPingInterval(Configuration conf, int pingInterval) { + conf.setInt(PING_INTERVAL_NAME, pingInterval); + } + + /** + * Get the ping interval from configuration; + * If not set in the configuration, return the default value. + * + * @param conf Configuration + * @return the ping interval + */ + static int getPingInterval(Configuration conf) { + return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); + } + + /** + * Set the socket timeout + * @param conf Configuration + * @param socketTimeout the socket timeout + */ + public static void setSocketTimeout(Configuration conf, int socketTimeout) { + conf.setInt(SOCKET_TIMEOUT, socketTimeout); + } + + /** + * @return the socket timeout + */ + static int getSocketTimeout(Configuration conf) { + return conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); + } + + /** A call waiting for a value. */ + protected class Call { + final int id; // call id + final Message param; // rpc request method param object + /** + * Optionally has cells when making call. Optionally has cells set on response. Used + * passing cells to the rpc and receiving the response. + */ + CellScanner cells; + Message response; // value, null if error + // The return type. Used to create shell into which we deserialize the response if any. + Message responseDefaultType; + IOException error; // exception, null if value + boolean done; // true when call is done + long startTime; + final MethodDescriptor md; + + protected Call(final MethodDescriptor md, Message param, final CellScanner cells, + final Message responseDefaultType) { + this.param = param; + this.md = md; + this.cells = cells; + this.startTime = System.currentTimeMillis(); + this.responseDefaultType = responseDefaultType; + synchronized (RpcClient.this) { + this.id = counter++; + } + } + + @Override + public String toString() { + return "callId: " + this.id + " methodName: " + this.md.getName() + " param {" + + (this.param != null? TextFormat.shortDebugString(this.param): "") + "}"; + } + + /** Indicate when the call is complete and the + * value or error are available. Notifies by default. */ + protected synchronized void callComplete() { + this.done = true; + notify(); // notify caller + } + + /** Set the exception when there is an error. + * Notify the caller the call is done. + * + * @param error exception thrown by the call; either local or remote + */ + public void setException(IOException error) { + this.error = error; + callComplete(); + } + + /** + * Set the return value when there is no error. + * Notify the caller the call is done. + * + * @param response return value of the call. + * @param cells Can be null + */ + public void setResponse(Message response, final CellScanner cells) { + this.response = response; + this.cells = cells; + callComplete(); + } + + public long getStartTime() { + return this.startTime; + } + } + + protected final static Map> tokenHandlers = + new HashMap>(); + static { + tokenHandlers.put(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN, + new AuthenticationTokenSelector()); + } + + /** + * Creates a connection. Can be overridden by a subclass for testing. + * @param remoteId - the ConnectionId to use for the connection creation. + */ + protected Connection createConnection(ConnectionId remoteId, final Codec codec, + final CompressionCodec compressor) + throws IOException { + return new Connection(remoteId, codec, compressor); + } + + /** Thread that reads responses and notifies callers. Each connection owns a + * socket connected to a remote address. Calls are multiplexed through this + * socket: responses may be delivered out of order. */ + protected class Connection extends Thread { + private ConnectionHeader header; // connection header + protected ConnectionId remoteId; + protected Socket socket = null; // connected socket + protected DataInputStream in; + protected DataOutputStream out; + private InetSocketAddress server; // server ip:port + private String serverPrincipal; // server's krb5 principal name + private AuthMethod authMethod; // authentication method + private boolean useSasl; + private Token token; + private HBaseSaslRpcClient saslRpcClient; + private int reloginMaxBackoff; // max pause before relogin on sasl failure + private final Codec codec; + private final CompressionCodec compressor; + + // currently active calls + protected final ConcurrentSkipListMap calls = + new ConcurrentSkipListMap(); + protected final AtomicLong lastActivity = + new AtomicLong(); // last I/O activity time + protected final AtomicBoolean shouldCloseConnection = + new AtomicBoolean(); // indicate if the connection is closed + protected IOException closeException; // close reason + + Connection(ConnectionId remoteId, final Codec codec, final CompressionCodec compressor) + throws IOException { + if (remoteId.getAddress().isUnresolved()) { + throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); + } + this.server = remoteId.getAddress(); + this.codec = codec; + this.compressor = compressor; + + UserGroupInformation ticket = remoteId.getTicket().getUGI(); + SecurityInfo securityInfo = SecurityInfo.getInfo(remoteId.getServiceName()); + this.useSasl = User.isHBaseSecurityEnabled(conf); + if (useSasl && securityInfo != null) { + AuthenticationProtos.TokenIdentifier.Kind tokenKind = securityInfo.getTokenKind(); + if (tokenKind != null) { + TokenSelector tokenSelector = + tokenHandlers.get(tokenKind); + if (tokenSelector != null) { + token = tokenSelector.selectToken(new Text(clusterId), + ticket.getTokens()); + } else if (LOG.isDebugEnabled()) { + LOG.debug("No token selector found for type "+tokenKind); + } + } + String serverKey = securityInfo.getServerPrincipal(); + if (serverKey == null) { + throw new IOException( + "Can't obtain server Kerberos config key from SecurityInfo"); + } + serverPrincipal = SecurityUtil.getServerPrincipal( + conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); + if (LOG.isDebugEnabled()) { + LOG.debug("RPC Server Kerberos principal name for service=" + + remoteId.getServiceName() + " is " + serverPrincipal); + } + } + + if (!useSasl) { + authMethod = AuthMethod.SIMPLE; + } else if (token != null) { + authMethod = AuthMethod.DIGEST; + } else { + authMethod = AuthMethod.KERBEROS; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Use " + authMethod + " authentication for service " + remoteId.serviceName + + ", sasl=" + useSasl); + } + reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); + this.remoteId = remoteId; + + ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); + builder.setServiceName(remoteId.getServiceName()); + UserInformation userInfoPB; + if ((userInfoPB = getUserInfo(ticket)) != null) { + builder.setUserInfo(userInfoPB); + } + builder.setCellBlockCodecClass(this.codec.getClass().getCanonicalName()); + if (this.compressor != null) { + builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName()); + } + this.header = builder.build(); + + this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + + remoteId.getAddress().toString() + + ((ticket==null)?" from an unknown user": (" from " + + ticket.getUserName()))); + this.setDaemon(true); + } + + private UserInformation getUserInfo(UserGroupInformation ugi) { + if (ugi == null || authMethod == AuthMethod.DIGEST) { + // Don't send user for token auth + return null; + } + UserInformation.Builder userInfoPB = UserInformation.newBuilder(); + if (authMethod == AuthMethod.KERBEROS) { + // Send effective user for Kerberos auth + userInfoPB.setEffectiveUser(ugi.getUserName()); + } else if (authMethod == AuthMethod.SIMPLE) { + //Send both effective user and real user for simple auth + userInfoPB.setEffectiveUser(ugi.getUserName()); + if (ugi.getRealUser() != null) { + userInfoPB.setRealUser(ugi.getRealUser().getUserName()); + } + } + return userInfoPB.build(); + } + + /** Update lastActivity with the current time. */ + protected void touch() { + lastActivity.set(System.currentTimeMillis()); + } + + /** + * Add a call to this connection's call queue and notify + * a listener; synchronized. If the connection is dead, the call is not added, and the + * caller is notified. + * This function can return a connection that is already marked as 'shouldCloseConnection' + * It is up to the user code to check this status. + * @param call to add + */ + protected synchronized void addCall(Call call) { + // If the connection is about to close, we manage this as if the call was already added + // to the connection calls list. If not, the connection creations are serialized, as + // mentioned in HBASE-6364 + if (this.shouldCloseConnection.get()) { + if (this.closeException == null) { + call.setException(new IOException( + "Call " + call.id + " not added as the connection " + remoteId + " is closing")); + } else { + call.setException(this.closeException); + } + synchronized (call) { + call.notifyAll(); + } + } else { + calls.put(call.id, call); + synchronized (call) { + notify(); + } + } + } + + /** This class sends a ping to the remote side when timeout on + * reading. If no failure is detected, it retries until at least + * a byte is read. + */ + protected class PingInputStream extends FilterInputStream { + /* constructor */ + protected PingInputStream(InputStream in) { + super(in); + } + + /* Process timeout exception + * if the connection is not going to be closed, send a ping. + * otherwise, throw the timeout exception. + */ + private void handleTimeout(SocketTimeoutException e) throws IOException { + if (shouldCloseConnection.get() || !running.get() || remoteId.rpcTimeout > 0) { + throw e; + } + sendPing(); + } + + /** Read a byte from the stream. + * Send a ping if timeout on read. Retries if no failure is detected + * until a byte is read. + * @throws IOException for any IO problem other than socket timeout + */ + @Override + public int read() throws IOException { + do { + try { + return super.read(); + } catch (SocketTimeoutException e) { + handleTimeout(e); + } + } while (true); + } + + /** Read bytes into a buffer starting from offset off + * Send a ping if timeout on read. Retries if no failure is detected + * until a byte is read. + * + * @return the total number of bytes read; -1 if the connection is closed. + */ + @Override + public int read(byte[] buf, int off, int len) throws IOException { + do { + try { + return super.read(buf, off, len); + } catch (SocketTimeoutException e) { + handleTimeout(e); + } + } while (true); + } + } + + protected synchronized void setupConnection() throws IOException { + short ioFailures = 0; + short timeoutFailures = 0; + while (true) { + try { + this.socket = socketFactory.createSocket(); + this.socket.setTcpNoDelay(tcpNoDelay); + this.socket.setKeepAlive(tcpKeepAlive); + // connection time out is 20s + NetUtils.connect(this.socket, remoteId.getAddress(), + getSocketTimeout(conf)); + if (remoteId.rpcTimeout > 0) { + pingInterval = remoteId.rpcTimeout; // overwrite pingInterval + } + this.socket.setSoTimeout(pingInterval); + return; + } catch (SocketTimeoutException toe) { + /* The max number of retries is 45, + * which amounts to 20s*45 = 15 minutes retries. + */ + handleConnectionFailure(timeoutFailures++, maxRetries, toe); + } catch (IOException ie) { + handleConnectionFailure(ioFailures++, maxRetries, ie); + } + } + } + + protected void closeConnection() { + // close the current connection + if (socket != null) { + try { + socket.close(); + } catch (IOException e) { + LOG.warn("Not able to close a socket", e); + } + } + // set socket to null so that the next call to setupIOstreams + // can start the process of connect all over again. + socket = null; + } + + /** + * Handle connection failures + * + * If the current number of retries is equal to the max number of retries, + * stop retrying and throw the exception; Otherwise backoff N seconds and + * try connecting again. + * + * This Method is only called from inside setupIOstreams(), which is + * synchronized. Hence the sleep is synchronized; the locks will be retained. + * + * @param curRetries current number of retries + * @param maxRetries max number of retries allowed + * @param ioe failure reason + * @throws IOException if max number of retries is reached + */ + private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) + throws IOException { + + closeConnection(); + + // throw the exception if the maximum number of retries is reached + if (curRetries >= maxRetries) { + throw ioe; + } + + // otherwise back off and retry + try { + Thread.sleep(failureSleep); + } catch (InterruptedException ignored) {} + + LOG.info("Retrying connect to server: " + remoteId.getAddress() + + " after sleeping " + failureSleep + "ms. Already tried " + curRetries + + " time(s)."); + } + + /* wait till someone signals us to start reading RPC response or + * it is idle too long, it is marked as to be closed, + * or the client is marked as not running. + * + * Return true if it is time to read a response; false otherwise. + */ + protected synchronized boolean waitForWork() { + if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { + long timeout = maxIdleTime - (System.currentTimeMillis()-lastActivity.get()); + if (timeout>0) { + try { + wait(timeout); + } catch (InterruptedException ignored) {} + } + } + + if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { + return true; + } else if (shouldCloseConnection.get()) { + return false; + } else if (calls.isEmpty()) { // idle connection closed or stopped + markClosed(null); + return false; + } else { // get stopped but there are still pending requests + markClosed((IOException)new IOException().initCause( + new InterruptedException())); + return false; + } + } + + public InetSocketAddress getRemoteAddress() { + return remoteId.getAddress(); + } + + /* Send a ping to the server if the time elapsed + * since last I/O activity is equal to or greater than the ping interval + */ + protected synchronized void sendPing() throws IOException { + // Can we do tcp keepalive instead of this pinging? + long curTime = System.currentTimeMillis(); + if ( curTime - lastActivity.get() >= pingInterval) { + lastActivity.set(curTime); + //noinspection SynchronizeOnNonFinalField + synchronized (this.out) { + out.writeInt(PING_CALL_ID); + out.flush(); + } + } + } + + @Override + public void run() { + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": starting, connections " + connections.size()); + } + + try { + while (waitForWork()) { // Wait here for work - read or close connection + readResponse(); + } + } catch (Throwable t) { + LOG.warn(getName() + ": unexpected exception receiving call responses", t); + markClosed(new IOException("Unexpected exception receiving call responses", t)); + } + + close(); + + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": stopped, connections " + connections.size()); + } + + private synchronized void disposeSasl() { + if (saslRpcClient != null) { + try { + saslRpcClient.dispose(); + saslRpcClient = null; + } catch (IOException ioe) { + LOG.error("Error disposing of SASL client", ioe); + } + } + } + + private synchronized boolean shouldAuthenticateOverKrb() throws IOException { + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + UserGroupInformation currentUser = + UserGroupInformation.getCurrentUser(); + UserGroupInformation realUser = currentUser.getRealUser(); + return authMethod == AuthMethod.KERBEROS && + loginUser != null && + //Make sure user logged in using Kerberos either keytab or TGT + loginUser.hasKerberosCredentials() && + // relogin only in case it is the login user (e.g. JT) + // or superuser (like oozie). + (loginUser.equals(currentUser) || loginUser.equals(realUser)); + } + + private synchronized boolean setupSaslConnection(final InputStream in2, + final OutputStream out2) throws IOException { + saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal); + return saslRpcClient.saslConnect(in2, out2); + } + + /** + * If multiple clients with the same principal try to connect + * to the same server at the same time, the server assumes a + * replay attack is in progress. This is a feature of kerberos. + * In order to work around this, what is done is that the client + * backs off randomly and tries to initiate the connection + * again. + * The other problem is to do with ticket expiry. To handle that, + * a relogin is attempted. + *

+ * The retry logic is governed by the {@link #shouldAuthenticateOverKrb} + * method. In case when the user doesn't have valid credentials, we don't + * need to retry (from cache or ticket). In such cases, it is prudent to + * throw a runtime exception when we receive a SaslException from the + * underlying authentication implementation, so there is no retry from + * other high level (for eg, HCM or HBaseAdmin). + *

+ */ + private synchronized void handleSaslConnectionFailure( + final int currRetries, + final int maxRetries, final Exception ex, final Random rand, + final UserGroupInformation user) + throws IOException, InterruptedException{ + user.doAs(new PrivilegedExceptionAction() { + public Object run() throws IOException, InterruptedException { + closeConnection(); + if (shouldAuthenticateOverKrb()) { + if (currRetries < maxRetries) { + LOG.debug("Exception encountered while connecting to " + + "the server : " + ex); + //try re-login + if (UserGroupInformation.isLoginKeytabBased()) { + UserGroupInformation.getLoginUser().reloginFromKeytab(); + } else { + UserGroupInformation.getLoginUser().reloginFromTicketCache(); + } + disposeSasl(); + //have granularity of milliseconds + //we are sleeping with the Connection lock held but since this + //connection instance is being used for connecting to the server + //in question, it is okay + Thread.sleep((rand.nextInt(reloginMaxBackoff) + 1)); + return null; + } else { + String msg = "Couldn't setup connection for " + + UserGroupInformation.getLoginUser().getUserName() + + " to " + serverPrincipal; + LOG.warn(msg); + throw (IOException) new IOException(msg).initCause(ex); + } + } else { + LOG.warn("Exception encountered while connecting to " + + "the server : " + ex); + } + if (ex instanceof RemoteException) { + throw (RemoteException)ex; + } + if (ex instanceof SaslException) { + String msg = "SASL authentication failed." + + " The most likely cause is missing or invalid credentials." + + " Consider 'kinit'."; + LOG.fatal(msg, ex); + throw new RuntimeException(msg, ex); + } + throw new IOException(ex); + } + }); + } + + protected synchronized void setupIOstreams() + throws IOException, InterruptedException { + if (socket != null || shouldCloseConnection.get()) { + return; + } + + if (failedServers.isFailedServer(remoteId.getAddress())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Not trying to connect to " + server + + " this server is in the failed servers list"); + } + IOException e = new FailedServerException( + "This server is in the failed servers list: " + server); + markClosed(e); + close(); + throw e; + } + + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to " + server); + } + short numRetries = 0; + final short MAX_RETRIES = 5; + Random rand = null; + while (true) { + setupConnection(); + InputStream inStream = NetUtils.getInputStream(socket); + OutputStream outStream = NetUtils.getOutputStream(socket); + // Write out the preamble -- MAGIC, version, and auth to use. + writeConnectionHeaderPreamble(outStream); + if (useSasl) { + final InputStream in2 = inStream; + final OutputStream out2 = outStream; + UserGroupInformation ticket = remoteId.getTicket().getUGI(); + if (authMethod == AuthMethod.KERBEROS) { + if (ticket != null && ticket.getRealUser() != null) { + ticket = ticket.getRealUser(); + } + } + boolean continueSasl = false; + if (ticket == null) throw new FatalConnectionException("ticket/user is null"); + try { + continueSasl = ticket.doAs(new PrivilegedExceptionAction() { + @Override + public Boolean run() throws IOException { + return setupSaslConnection(in2, out2); + } + }); + } catch (Exception ex) { + if (rand == null) { + rand = new Random(); + } + handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, rand, ticket); + continue; + } + if (continueSasl) { + // Sasl connect is successful. Let's set up Sasl i/o streams. + inStream = saslRpcClient.getInputStream(inStream); + outStream = saslRpcClient.getOutputStream(outStream); + } else { + // fall back to simple auth because server told us so. + authMethod = AuthMethod.SIMPLE; + useSasl = false; + } + } + this.in = new DataInputStream(new BufferedInputStream(new PingInputStream(inStream))); + this.out = new DataOutputStream(new BufferedOutputStream(outStream)); + // Now write out the connection header + writeConnectionHeader(); + + // update last activity time + touch(); + + // start the receiver thread after the socket connection has been set up + start(); + return; + } + } catch (Throwable t) { + failedServers.addToFailedServers(remoteId.address); + IOException e = null; + if (t instanceof IOException) { + e = (IOException)t; + markClosed(e); + } else { + e = new IOException("Coundn't set up IO Streams", t); + markClosed(e); + } + close(); + throw e; + } + } + + /** + * Write the RPC header: + */ + private void writeConnectionHeaderPreamble(OutputStream outStream) throws IOException { + // Assemble the preamble up in a buffer first and then send it. Writing individual elements, + // they are getting sent across piecemeal according to wireshark and then server is messing + // up the reading on occasion (the passed in stream is not buffered yet). + + // Preamble is six bytes -- 'HBas' + VERSION + AUTH_CODE + int rpcHeaderLen = HConstants.RPC_HEADER.array().length; + byte [] preamble = new byte [rpcHeaderLen + 2]; + System.arraycopy(HConstants.RPC_HEADER.array(), 0, preamble, 0, rpcHeaderLen); + preamble[rpcHeaderLen] = HConstants.RPC_CURRENT_VERSION; + preamble[rpcHeaderLen + 1] = authMethod.code; + outStream.write(preamble); + outStream.flush(); + } + + /** + * Write the connection header. + * Out is not synchronized because only the first thread does this. + */ + private void writeConnectionHeader() throws IOException { + synchronized (this.out) { + this.out.writeInt(this.header.getSerializedSize()); + this.header.writeTo(this.out); + this.out.flush(); + } + } + + /** Close the connection. */ + protected synchronized void close() { + if (!shouldCloseConnection.get()) { + LOG.error(getName() + ": the connection is not in the closed state"); + return; + } + + // release the resources + // first thing to do;take the connection out of the connection list + synchronized (connections) { + if (connections.get(remoteId) == this) { + connections.remove(remoteId); + } + } + + // close the streams and therefore the socket + IOUtils.closeStream(out); + this.out = null; + IOUtils.closeStream(in); + this.in = null; + disposeSasl(); + + // clean up all calls + if (closeException == null) { + if (!calls.isEmpty()) { + LOG.warn(getName() + ": connection is closed for no cause and calls are not empty. " + + "#Calls: " + calls.size()); + + // clean up calls anyway + closeException = new IOException("Unexpected closed connection"); + cleanupCalls(); + } + } else { + // log the info + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": closing ipc connection to " + server + ": " + + closeException.getMessage(), closeException); + } + + // cleanup calls + cleanupCalls(); + } + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": closed"); + } + + /** + * Initiates a call by sending the parameter to the remote server. + * Note: this is not called from the Connection thread, but by other + * threads. + * @param call + * @see #readResponse() + */ + protected void writeRequest(Call call) { + if (shouldCloseConnection.get()) return; + try { + RequestHeader.Builder builder = RequestHeader.newBuilder(); + builder.setCallId(call.id); + if (Trace.isTracing()) { + Span s = Trace.currentTrace(); + builder.setTraceInfo(RPCTInfo.newBuilder(). + setParentId(s.getSpanId()).setTraceId(s.getTraceId())); + } + builder.setMethodName(call.md.getName()); + builder.setRequestParam(call.param != null); + ByteBuffer cellBlock = ipcUtil.buildCellBlock(this.codec, this.compressor, call.cells); + if (cellBlock != null) { + CellBlockMeta.Builder cellBlockBuilder = CellBlockMeta.newBuilder(); + cellBlockBuilder.setLength(cellBlock.limit()); + builder.setCellBlockMeta(cellBlockBuilder.build()); + } + //noinspection SynchronizeOnNonFinalField + RequestHeader header = builder.build(); + synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC + IPCUtil.write(this.out, header, call.param, cellBlock); + } + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": wrote request header " + TextFormat.shortDebugString(header)); + } + } catch(IOException e) { + markClosed(e); + } + } + + /* Receive a response. + * Because only one receiver, so no synchronization on in. + */ + protected void readResponse() { + if (shouldCloseConnection.get()) return; + touch(); + int totalSize = -1; + try { + // See HBaseServer.Call.setResponse for where we write out the response. + // Total size of the response. Unused. But have to read it in anyways. + totalSize = in.readInt(); + + // Read the header + ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); + int id = responseHeader.getCallId(); + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": got response header " + + TextFormat.shortDebugString(responseHeader) + ", totalSize: " + totalSize + " bytes"); + } + Call call = calls.get(id); + if (call == null) { + // So we got a response for which we have no corresponding 'call' here on the client-side. + // We probably timed out waiting, cleaned up all references, and now the server decides + // to return a response. There is nothing we can do w/ the response at this stage. Clean + // out the wire of the response so its out of the way and we can get other responses on + // this connection. + int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); + int whatIsLeftToRead = totalSize - readSoFar; + LOG.debug("Unknown callId: " + id + ", skipping over this response of " + + whatIsLeftToRead + " bytes"); + IOUtils.skipFully(in, whatIsLeftToRead); + } + if (responseHeader.hasException()) { + ExceptionResponse exceptionResponse = responseHeader.getException(); + RemoteException re = createRemoteException(exceptionResponse); + if (isFatalConnectionException(exceptionResponse)) { + markClosed(re); + } else { + if (call != null) call.setException(re); + } + } else { + Message value = null; + // Call may be null because it may have timedout and been cleaned up on this side already + if (call != null && call.responseDefaultType != null) { + Builder builder = call.responseDefaultType.newBuilderForType(); + builder.mergeDelimitedFrom(in); + value = builder.build(); + } + CellScanner cellBlockScanner = null; + if (responseHeader.hasCellBlockMeta()) { + int size = responseHeader.getCellBlockMeta().getLength(); + byte [] cellBlock = new byte[size]; + IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); + cellBlockScanner = ipcUtil.createCellScanner(this.codec, this.compressor, cellBlock); + } + // it's possible that this call may have been cleaned up due to a RPC + // timeout, so check if it still exists before setting the value. + if (call != null) call.setResponse(value, cellBlockScanner); + } + if (call != null) calls.remove(id); + } catch (IOException e) { + if (e instanceof SocketTimeoutException && remoteId.rpcTimeout > 0) { + // Clean up open calls but don't treat this as a fatal condition, + // since we expect certain responses to not make it by the specified + // {@link ConnectionId#rpcTimeout}. + closeException = e; + } else { + // Treat this as a fatal condition and close this connection + markClosed(e); + } + } finally { + if (remoteId.rpcTimeout > 0) { + cleanupCalls(remoteId.rpcTimeout); + } + } + } + + /** + * @param e + * @return True if the exception is a fatal connection exception. + */ + private boolean isFatalConnectionException(final ExceptionResponse e) { + return e.getExceptionClassName(). + equals(FatalConnectionException.class.getName()); + } + + /** + * @param e + * @return RemoteException made from passed e + */ + private RemoteException createRemoteException(final ExceptionResponse e) { + String innerExceptionClassName = e.getExceptionClassName(); + boolean doNotRetry = e.getDoNotRetry(); + return e.hasHostname()? + // If a hostname then add it to the RemoteWithExtrasException + new RemoteWithExtrasException(innerExceptionClassName, + e.getStackTrace(), e.getHostname(), e.getPort(), doNotRetry): + new RemoteWithExtrasException(innerExceptionClassName, + e.getStackTrace(), doNotRetry); + } + + protected synchronized void markClosed(IOException e) { + if (shouldCloseConnection.compareAndSet(false, true)) { + closeException = e; + notifyAll(); + } + } + + /* Cleanup all calls and mark them as done */ + protected void cleanupCalls() { + cleanupCalls(0); + } + + protected void cleanupCalls(long rpcTimeout) { + Iterator> itor = calls.entrySet().iterator(); + while (itor.hasNext()) { + Call c = itor.next().getValue(); + long waitTime = System.currentTimeMillis() - c.getStartTime(); + if (waitTime >= rpcTimeout) { + if (this.closeException == null) { + // There may be no exception in the case that there are many calls + // being multiplexed over this connection and these are succeeding + // fine while this Call object is taking a long time to finish + // over on the server; e.g. I just asked the regionserver to bulk + // open 3k regions or its a big fat multiput into a heavily-loaded + // server (Perhaps this only happens at the extremes?) + this.closeException = new CallTimeoutException("Call id=" + c.id + + ", waitTime=" + waitTime + ", rpcTimetout=" + rpcTimeout); + } + c.setException(this.closeException); + synchronized (c) { + c.notifyAll(); + } + itor.remove(); + } else { + break; + } + } + try { + if (!calls.isEmpty()) { + Call firstCall = calls.get(calls.firstKey()); + long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime(); + if (maxWaitTime < rpcTimeout) { + rpcTimeout -= maxWaitTime; + } + } + if (!shouldCloseConnection.get()) { + closeException = null; + if (socket != null) { + socket.setSoTimeout((int) rpcTimeout); + } + } + } catch (SocketException e) { + LOG.debug("Couldn't lower timeout, which may result in longer than expected calls"); + } + } + } + + /** + * Client-side call timeout + */ + @SuppressWarnings("serial") + public static class CallTimeoutException extends IOException { + public CallTimeoutException(final String msg) { + super(msg); + } + } + + /** + * Construct an IPC cluster client whose values are of the {@link Message} class. + * @param conf configuration + * @param factory socket factory + */ + RpcClient(Configuration conf, String clusterId, SocketFactory factory) { + this.maxIdleTime = conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s + this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); + this.failureSleep = conf.getInt("hbase.client.pause", 1000); + this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); + this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); + this.pingInterval = getPingInterval(conf); + this.ipcUtil = new IPCUtil(conf); + this.conf = conf; + this.codec = getCodec(conf); + this.compressor = getCompressor(conf); + this.socketFactory = factory; + this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; + this.connections = new PoolMap(getPoolType(conf), getPoolSize(conf)); + this.failedServers = new FailedServers(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor + + ", tcpKeepAlive=" + this.tcpKeepAlive + + ", tcpNoDelay=" + this.tcpNoDelay + + ", maxIdleTime=" + this.maxIdleTime + + ", maxRetries=" + this.maxRetries + + ", ping interval=" + this.pingInterval + "ms."); + } + } + + /** + * Construct an IPC client for the cluster clusterId with the default SocketFactory + * @param conf configuration + * @param clusterId + */ + public RpcClient(Configuration conf, String clusterId) { + this(conf, clusterId, NetUtils.getDefaultSocketFactory(conf)); + } + + /** + * Encapsulate the ugly casting and RuntimeException conversion in private method. + * @param conf + * @return Codec to use on this client. + */ + private static Codec getCodec(final Configuration conf) { + String className = conf.get("hbase.client.rpc.codec", KeyValueCodec.class.getCanonicalName()); + try { + return (Codec)Class.forName(className).newInstance(); + } catch (Exception e) { + throw new RuntimeException("Failed getting codec " + className, e); + } + } + + /** + * Encapsulate the ugly casting and RuntimeException conversion in private method. + * @param conf + * @return The compressor to use on this client. + */ + private static CompressionCodec getCompressor(final Configuration conf) { + String className = conf.get("hbase.client.rpc.compressor", null); + if (className == null || className.isEmpty()) return null; + try { + return (CompressionCodec)Class.forName(className).newInstance(); + } catch (Exception e) { + throw new RuntimeException("Failed getting compressor " + className, e); + } + } + + /** + * Return the pool type specified in the configuration, which must be set to + * either {@link PoolType#RoundRobin} or {@link PoolType#ThreadLocal}, + * otherwise default to the former. + * + * For applications with many user threads, use a small round-robin pool. For + * applications with few user threads, you may want to try using a + * thread-local pool. In any case, the number of {@link RpcClient} instances + * should not exceed the operating system's hard limit on the number of + * connections. + * + * @param config configuration + * @return either a {@link PoolType#RoundRobin} or + * {@link PoolType#ThreadLocal} + */ + protected static PoolType getPoolType(Configuration config) { + return PoolType.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE), + PoolType.RoundRobin, PoolType.ThreadLocal); + } + + /** + * Return the pool size specified in the configuration, which is applicable only if + * the pool type is {@link PoolType#RoundRobin}. + * + * @param config + * @return the maximum pool size + */ + protected static int getPoolSize(Configuration config) { + return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); + } + + /** Return the socket factory of this client + * + * @return this client's socket factory + */ + SocketFactory getSocketFactory() { + return socketFactory; + } + + /** Stop all threads related to this client. No further calls may be made + * using this client. */ + public void stop() { + if (LOG.isDebugEnabled()) LOG.debug("Stopping rpc client"); + if (!running.compareAndSet(true, false)) return; + + // wake up all connections + synchronized (connections) { + for (Connection conn : connections.values()) { + conn.interrupt(); + } + } + + // wait until all connections are closed + while (!connections.isEmpty()) { + try { + Thread.sleep(100); + } catch (InterruptedException ignored) { + } + } + } + + /** Make a call, passing param, to the IPC server running at + * address which is servicing the protocol protocol, + * with the ticket credentials, returning the value. + * Throws exceptions if there are network problems or if the remote code + * threw an exception. + * @param md + * @param param + * @param cells + * @param addr + * @param returnType + * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. + * {@link User#getCurrent()} makes a new instance of User each time so will be a new Connection + * each time. + * @param rpcTimeout + * @return A pair with the Message response and the Cell data (if any). + * @throws InterruptedException + * @throws IOException + */ + Pair call(MethodDescriptor md, Message param, CellScanner cells, + Message returnType, User ticket, InetSocketAddress addr, + int rpcTimeout) + throws InterruptedException, IOException { + Call call = new Call(md, param, cells, returnType); + Connection connection = + getConnection(ticket, call, addr, rpcTimeout, this.codec, this.compressor); + connection.writeRequest(call); // send the parameter + boolean interrupted = false; + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (call) { + while (!call.done) { + try { + call.wait(); // wait for the result + } catch (InterruptedException ignored) { + // save the fact that we were interrupted + interrupted = true; + } + } + + if (interrupted) { + // set the interrupt flag now that we are done waiting + Thread.currentThread().interrupt(); + } + + if (call.error != null) { + if (call.error instanceof RemoteException) { + call.error.fillInStackTrace(); + throw call.error; + } + // local exception + throw wrapException(addr, call.error); + } + return new Pair(call.response, call.cells); + } + } + + /** + * Take an IOException and the address we were trying to connect to + * and return an IOException with the input exception as the cause. + * The new exception provides the stack trace of the place where + * the exception is thrown and some extra diagnostics information. + * If the exception is ConnectException or SocketTimeoutException, + * return a new one of the same type; Otherwise return an IOException. + * + * @param addr target address + * @param exception the relevant exception + * @return an exception to throw + */ + protected IOException wrapException(InetSocketAddress addr, + IOException exception) { + if (exception instanceof ConnectException) { + //connection refused; include the host:port in the error + return (ConnectException)new ConnectException( + "Call to " + addr + " failed on connection exception: " + exception).initCause(exception); + } else if (exception instanceof SocketTimeoutException) { + return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr + + " failed because " + exception).initCause(exception); + } else { + return (IOException)new IOException("Call to " + addr + " failed on local exception: " + + exception).initCause(exception); + } + } + + /** + * Interrupt the connections to the given ip:port server. This should be called if the server + * is known as actually dead. This will not prevent current operation to be retried, and, + * depending on their own behavior, they may retry on the same server. This can be a feature, + * for example at startup. In any case, they're likely to get connection refused (if the + * process died) or no route to host: i.e. there next retries should be faster and with a + * safe exception. + */ + public void cancelConnections(String hostname, int port, IOException ioe) { + synchronized (connections) { + for (Connection connection : connections.values()) { + if (connection.isAlive() && + connection.getRemoteAddress().getPort() == port && + connection.getRemoteAddress().getHostName().equals(hostname)) { + LOG.info("The server on " + hostname + ":" + port + + " is dead - stopping the connection " + connection.remoteId); + connection.closeConnection(); + // We could do a connection.interrupt(), but it's safer not to do it, as the + // interrupted exception behavior is not defined nor enforced enough. + } + } + } + } + + /* Get a connection from the pool, or create a new one and add it to the + * pool. Connections to a given host/port are reused. */ + protected Connection getConnection(User ticket, Call call, InetSocketAddress addr, + int rpcTimeout, final Codec codec, final CompressionCodec compressor) + throws IOException, InterruptedException { + if (!running.get()) throw new StoppedRpcClientException(); + Connection connection; + ConnectionId remoteId = + new ConnectionId(ticket, call.md.getService().getName(), addr, rpcTimeout); + synchronized (connections) { + connection = connections.get(remoteId); + if (connection == null) { + connection = createConnection(remoteId, this.codec, this.compressor); + connections.put(remoteId, connection); + } + } + connection.addCall(call); + + //we don't invoke the method below inside "synchronized (connections)" + //block above. The reason for that is if the server happens to be slow, + //it will take longer to establish a connection and that will slow the + //entire system down. + //Moreover, if the connection is currently created, there will be many threads + // waiting here; as setupIOstreams is synchronized. If the connection fails with a + // timeout, they will all fail simultaneously. This is checked in setupIOstreams. + connection.setupIOstreams(); + return connection; + } + + /** + * This class holds the address and the user ticket, etc. The client connections + * to servers are uniquely identified by + */ + protected static class ConnectionId { + final InetSocketAddress address; + final User ticket; + final int rpcTimeout; + private static final int PRIME = 16777619; + final String serviceName; + + ConnectionId(User ticket, + String serviceName, + InetSocketAddress address, + int rpcTimeout) { + this.address = address; + this.ticket = ticket; + this.rpcTimeout = rpcTimeout; + this.serviceName = serviceName; + } + + String getServiceName() { + return this.serviceName; + } + + InetSocketAddress getAddress() { + return address; + } + + User getTicket() { + return ticket; + } + + @Override + public String toString() { + return this.address.toString() + "/" + this.serviceName + "/" + this.ticket + "/" + + this.rpcTimeout; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ConnectionId) { + ConnectionId id = (ConnectionId) obj; + return address.equals(id.address) && + ((ticket != null && ticket.equals(id.ticket)) || + (ticket == id.ticket)) && rpcTimeout == id.rpcTimeout && + this.serviceName == id.serviceName; + } + return false; + } + + @Override // simply use the default Object#hashcode() ? + public int hashCode() { + int hashcode = (address.hashCode() + + PRIME * (PRIME * this.serviceName.hashCode() ^ + (ticket == null ? 0 : ticket.hashCode()) )) ^ + rpcTimeout; + return hashcode; + } + } + + public static void setRpcTimeout(int t) { + rpcTimeout.set(t); + } + + public static int getRpcTimeout() { + return rpcTimeout.get(); + } + + public static void resetRpcTimeout() { + rpcTimeout.remove(); + } + + /** Make a blocking call. + * Throws exceptions if there are network problems or if the remote code + * threw an exception. + * @param md + * @param controller + * @param param + * @param returnType + * @param isa + * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. + * {@link User#getCurrent()} makes a new instance of User each time so will be a new Connection + * each time. + * @param rpcTimeout + * @return A pair with the Message response and the Cell data (if any). + * @throws InterruptedException + * @throws IOException + */ + Message callBlockingMethod(MethodDescriptor md, RpcController controller, + Message param, Message returnType, final User ticket, final InetSocketAddress isa, + final int rpcTimeout) + throws ServiceException { + long startTime = 0; + if (LOG.isTraceEnabled()) { + startTime = System.currentTimeMillis(); + } + PayloadCarryingRpcController pcrc = (PayloadCarryingRpcController)controller; + CellScanner cells = null; + if (pcrc != null) { + cells = pcrc.cellScanner(); + // Clear it here so we don't by mistake try and these cells processing results. + pcrc.setCellScanner(null); + } + Pair val = null; + try { + val = call(md, param, cells, returnType, ticket, isa, rpcTimeout); + if (pcrc != null) { + // Shove the results into controller so can be carried across the proxy/pb service void. + if (val.getSecond() != null) pcrc.setCellScanner(val.getSecond()); + } else if (val.getSecond() != null) { + throw new ServiceException("Client dropping data on the floor!"); + } + + if (LOG.isTraceEnabled()) { + long callTime = System.currentTimeMillis() - startTime; + if (LOG.isTraceEnabled()) { + LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms"); + } + } + return val.getFirst(); + } catch (Throwable e) { + throw new ServiceException(e); + } + } + + /** + * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up + * protobuf blocking stubs. + * @param sn + * @param ticket + * @param rpcTimeout + * @return A blocking rpc channel that goes via this rpc client instance. + */ + public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, + final User ticket, final int rpcTimeout) { + return new BlockingRpcChannelImplementation(this, sn, ticket, rpcTimeout); + } + + /** + * Blocking rpc channel that goes via hbase rpc. + */ + // Public so can be subclassed for tests. + public static class BlockingRpcChannelImplementation implements BlockingRpcChannel { + private final InetSocketAddress isa; + private volatile RpcClient rpcClient; + private final int rpcTimeout; + private final User ticket; + + protected BlockingRpcChannelImplementation(final RpcClient rpcClient, final ServerName sn, + final User ticket, final int rpcTimeout) { + this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort()); + this.rpcClient = rpcClient; + this.rpcTimeout = rpcTimeout; + this.ticket = ticket; + } + + @Override + public Message callBlockingMethod(MethodDescriptor md, RpcController controller, + Message param, Message returnType) + throws ServiceException { + return this.rpcClient.callBlockingMethod(md, controller, param, returnType, this.ticket, + this.isa, this.rpcTimeout); + } + } +} \ No newline at end of file Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java Fri May 3 03:58:33 2013 @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.hbase.exceptions.HBaseIOException; + +public class StoppedRpcClientException extends HBaseIOException { + public StoppedRpcClientException() { + super(); + } + + public StoppedRpcClientException(String msg) { + super(msg); + } +} \ No newline at end of file Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java Fri May 3 03:58:33 2013 @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +public class UnsupportedCellCodecException extends FatalConnectionException { + public UnsupportedCellCodecException() { + super(); + } + + public UnsupportedCellCodecException(String msg) { + super(msg); + } + + public UnsupportedCellCodecException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java Fri May 3 03:58:33 2013 @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +public class UnsupportedCompressionCodecException extends FatalConnectionException { + public UnsupportedCompressionCodecException() { + super(); + } + + public UnsupportedCompressionCodecException(String msg) { + super(msg); + } + + public UnsupportedCompressionCodecException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java Fri May 3 03:58:33 2013 @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +public class WrongVersionException extends FatalConnectionException { + public WrongVersionException() { + super(); + } + + public WrongVersionException(String msg) { + super(msg); + } + + public WrongVersionException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java?rev=1478639&r1=1478638&r2=1478639&view=diff ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (original) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java Fri May 3 03:58:33 2013 @@ -46,11 +46,8 @@ import org.apache.hadoop.hbase.HRegionIn import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.MasterAdminProtocol; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -67,6 +64,7 @@ import org.apache.hadoop.hbase.filter.Fi import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -85,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf. import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; @@ -105,6 +104,7 @@ import org.apache.hadoop.hbase.protobuf. import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.security.access.Permission; @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.util.Dyna import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import com.google.common.collect.ArrayListMultimap; @@ -222,6 +223,9 @@ public final class ProtobufUtil { if (e == null) { return new IOException(se); } + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } return e instanceof IOException ? (IOException) e : new IOException(se); } @@ -1206,7 +1210,7 @@ public final class ProtobufUtil { * @return the result of the Get * @throws IOException */ - public static Result get(final ClientProtocol client, + public static Result get(final ClientService.BlockingInterface client, final byte[] regionName, final Get get) throws IOException { GetRequest request = RequestConverter.buildGetRequest(regionName, get); @@ -1229,7 +1233,7 @@ public final class ProtobufUtil { * @return the row or the closestRowBefore if it doesn't exist * @throws IOException */ - public static Result getRowOrBefore(final ClientProtocol client, + public static Result getRowOrBefore(final ClientService.BlockingInterface client, final byte[] regionName, final byte[] row, final byte[] family) throws IOException { GetRequest request = @@ -1254,7 +1258,7 @@ public final class ProtobufUtil { * @return true if all are loaded * @throws IOException */ - public static boolean bulkLoadHFile(final ClientProtocol client, + public static boolean bulkLoadHFile(final ClientService.BlockingInterface client, final List> familyPaths, final byte[] regionName, boolean assignSeqNum) throws IOException { BulkLoadHFileRequest request = @@ -1268,7 +1272,7 @@ public final class ProtobufUtil { } } - public static CoprocessorServiceResponse execService(final ClientProtocol client, + public static CoprocessorServiceResponse execService(final ClientService.BlockingInterface client, final CoprocessorServiceCall call, final byte[] regionName) throws IOException { CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() .setCall(call).setRegion( @@ -1282,8 +1286,9 @@ public final class ProtobufUtil { } } - public static CoprocessorServiceResponse execService(final MasterAdminProtocol client, - final CoprocessorServiceCall call) throws IOException { + public static CoprocessorServiceResponse execService( + final MasterAdminService.BlockingInterface client, final CoprocessorServiceCall call) + throws IOException { CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() .setCall(call).setRegion( RequestConverter.buildRegionSpecifier(REGION_NAME, HConstants.EMPTY_BYTE_ARRAY)).build(); @@ -1315,7 +1320,7 @@ public final class ProtobufUtil { * @return the retrieved region info * @throws IOException */ - public static HRegionInfo getRegionInfo(final AdminProtocol admin, + public static HRegionInfo getRegionInfo(final AdminService.BlockingInterface admin, final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = @@ -1337,7 +1342,7 @@ public final class ProtobufUtil { * @param transitionInZK * @throws IOException */ - public static void closeRegion(final AdminProtocol admin, + public static void closeRegion(final AdminService.BlockingInterface admin, final byte[] regionName, final boolean transitionInZK) throws IOException { CloseRegionRequest closeRegionRequest = RequestConverter.buildCloseRegionRequest(regionName, transitionInZK); @@ -1358,7 +1363,8 @@ public final class ProtobufUtil { * @return true if the region is closed * @throws IOException */ - public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName, + public static boolean closeRegion(final AdminService.BlockingInterface admin, + final byte[] regionName, final int versionOfClosingNode, final ServerName destinationServer, final boolean transitionInZK) throws IOException { CloseRegionRequest closeRegionRequest = @@ -1379,7 +1385,7 @@ public final class ProtobufUtil { * @param region * @throws IOException */ - public static void openRegion(final AdminProtocol admin, + public static void openRegion(final AdminService.BlockingInterface admin, final HRegionInfo region) throws IOException { OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(region, -1); @@ -1398,7 +1404,8 @@ public final class ProtobufUtil { * @return a list of online region info * @throws IOException */ - public static List getOnlineRegions(final AdminProtocol admin) throws IOException { + public static List getOnlineRegions(final AdminService.BlockingInterface admin) + throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1431,8 +1438,8 @@ public final class ProtobufUtil { * @return the server name * @throws IOException */ - public static ServerInfo getServerInfo( - final AdminProtocol admin) throws IOException { + public static ServerInfo getServerInfo(final AdminService.BlockingInterface admin) + throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(null, request); @@ -1452,8 +1459,9 @@ public final class ProtobufUtil { * @return the list of store files * @throws IOException */ - public static List getStoreFiles(final AdminProtocol admin, - final byte[] regionName, final byte[] family) throws IOException { + public static List getStoreFiles(final AdminService.BlockingInterface admin, + final byte[] regionName, final byte[] family) + throws IOException { GetStoreFileRequest request = RequestConverter.buildGetStoreFileRequest(regionName, family); try { @@ -1472,7 +1480,7 @@ public final class ProtobufUtil { * @param splitPoint * @throws IOException */ - public static void split(final AdminProtocol admin, + public static void split(final AdminService.BlockingInterface admin, final HRegionInfo hri, byte[] splitPoint) throws IOException { SplitRegionRequest request = RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); @@ -1493,7 +1501,7 @@ public final class ProtobufUtil { * two adjacent regions * @throws IOException */ - public static void mergeRegions(final AdminProtocol admin, + public static void mergeRegions(final AdminService.BlockingInterface admin, final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible) throws IOException { MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest( Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java?rev=1478639&view=auto ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java (added) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java Fri May 3 03:58:33 2013 @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Maps RPC protocol interfaces to required configuration + */ +public class SecurityInfo { + /** Maps RPC service names to authentication information */ + private static ConcurrentMap infos = new ConcurrentHashMap(); + // populate info for known services + static { + infos.put(AdminProtos.AdminService.getDescriptor().getName(), + new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(ClientProtos.ClientService.getDescriptor().getName(), + new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(MasterAdminProtos.MasterAdminService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(MasterMonitorProtos.MasterMonitorService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + } + + /** + * Adds a security configuration for a new service name. Note that this will have no effect if + * the service name was already registered. + */ + public static void addInfo(String serviceName, SecurityInfo securityInfo) { + infos.putIfAbsent(serviceName, securityInfo); + } + + /** + * Returns the security configuration associated with the given service name. + */ + public static SecurityInfo getInfo(String serviceName) { + return infos.get(serviceName); + } + + private final String serverPrincipal; + private final Kind tokenKind; + + public SecurityInfo(String serverPrincipal, Kind tokenKind) { + this.serverPrincipal = serverPrincipal; + this.tokenKind = tokenKind; + } + + public String getServerPrincipal() { + return serverPrincipal; + } + + public Kind getTokenKind() { + return tokenKind; + } +} Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1478639&r1=1478638&r2=1478639&view=diff ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Fri May 3 03:58:33 2013 @@ -116,7 +116,8 @@ public class RecoverableZooKeeper { // the identifier = processID@hostName identifier = ManagementFactory.getRuntimeMXBean().getName(); } - LOG.info("The identifier of this process is " + identifier); + LOG.info("Process identifier=" + identifier + + " connecting to ZooKeeper ensemble=" + quorumServers); this.identifier = identifier; this.id = Bytes.toBytes(identifier); Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java?rev=1478639&r1=1478638&r2=1478639&view=diff ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java (original) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java Fri May 3 03:58:33 2013 @@ -56,8 +56,7 @@ public class ZKConfig { * @return Properties holding mappings representing ZooKeeper config file. */ public static Properties makeZKProps(Configuration conf) { - if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, - false)) { + if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, false)) { LOG.warn( "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME + " file for ZK properties " + @@ -80,12 +79,9 @@ public class ZKConfig { } } } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Skipped reading ZK properties file '" + - HConstants.ZOOKEEPER_CONFIG_NAME + - "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + - "' was not set to true"); + if (LOG.isTraceEnabled()) { + LOG.trace("Skipped reading ZK properties file '" + HConstants.ZOOKEEPER_CONFIG_NAME + + "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + "' was not set to true"); } } Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1478639&r1=1478638&r2=1478639&view=diff ============================================================================== --- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original) +++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Fri May 3 03:58:33 2013 @@ -116,8 +116,9 @@ public class ZKUtil { } int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - LOG.debug(identifier + " opening connection to ZooKeeper with ensemble (" + - ensemble + ")"); + if (LOG.isTraceEnabled()) { + LOG.debug(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); + } int retry = conf.getInt("zookeeper.recovery.retry", 3); int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); @@ -419,9 +420,9 @@ public class ZKUtil { Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); boolean exists = s != null ? true : false; if (exists) { - LOG.debug(zkw.prefix("Set watcher on existing znode " + znode)); + LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode)); } else { - LOG.debug(zkw.prefix(znode+" does not exist. Watcher is set.")); + LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode)); } return exists; } catch (KeeperException e) {