hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject [18/38] HBASE-12197 Move rest to it's on module
Date Fri, 10 Oct 2014 16:53:21 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
deleted file mode 100644
index 45dd9ee..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-import javax.xml.namespace.QName;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class SchemaResource extends ResourceBase {
-  private static final Log LOG = LogFactory.getLog(SchemaResource.class);
-
-  static CacheControl cacheControl;
-  static {
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  TableResource tableResource;
-
-  /**
-   * Constructor
-   * @param tableResource
-   * @throws IOException
-   */
-  public SchemaResource(TableResource tableResource) throws IOException {
-    super();
-    this.tableResource = tableResource;
-  }
-
-  private HTableDescriptor getTableSchema() throws IOException,
-      TableNotFoundException {
-    Table table = servlet.getTable(tableResource.getName());
-    try {
-      return table.getTableDescriptor();
-    } finally {
-      table.close();
-    }
-  }
-
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF})
-  public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      ResponseBuilder response =
-        Response.ok(new TableSchemaModel(getTableSchema()));
-      response.cacheControl(cacheControl);
-      servlet.getMetrics().incrementSucessfulGetRequests(1);
-      return response.build();
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedGetRequests(1);
-      return processException(e);
-    } 
-  }
-
-  private Response replace(final byte[] name, final TableSchemaModel model,
-      final UriInfo uriInfo, final HBaseAdmin admin) {
-    if (servlet.isReadOnly()) {
-      return Response.status(Response.Status.FORBIDDEN)
-        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
-        .build();
-    }
-    try {
-      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
-      for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
-        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-      }
-      for (ColumnSchemaModel family: model.getColumns()) {
-        HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
-        for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
-          hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-        }
-        htd.addFamily(hcd);
-      }
-      if (admin.tableExists(name)) {
-        admin.disableTable(name);
-        admin.modifyTable(name, htd);
-        admin.enableTable(name);
-        servlet.getMetrics().incrementSucessfulPutRequests(1);
-      } else try {
-        admin.createTable(htd);
-        servlet.getMetrics().incrementSucessfulPutRequests(1);
-      } catch (TableExistsException e) {
-        // race, someone else created a table with the same name
-        return Response.status(Response.Status.NOT_MODIFIED)
-          .type(MIMETYPE_TEXT).entity("Not modified" + CRLF)
-          .build();
-      }
-      return Response.created(uriInfo.getAbsolutePath()).build();
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedPutRequests(1);
-      return processException(e);
-    }
-  }
-
-  private Response update(final byte[] name, final TableSchemaModel model,
-      final UriInfo uriInfo, final HBaseAdmin admin) {
-    if (servlet.isReadOnly()) {
-      return Response.status(Response.Status.FORBIDDEN)
-        .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
-        .build();
-    }
-    try {
-      HTableDescriptor htd = admin.getTableDescriptor(name);
-      admin.disableTable(name);
-      try {
-        for (ColumnSchemaModel family: model.getColumns()) {
-          HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
-          for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
-            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-          }
-          if (htd.hasFamily(hcd.getName())) {
-            admin.modifyColumn(name, hcd);
-          } else {
-            admin.addColumn(name, hcd);
-          }
-        }
-      } catch (IOException e) {
-        return Response.status(Response.Status.SERVICE_UNAVAILABLE)
-          .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
-          .build();
-      } finally {
-        admin.enableTable(tableResource.getName());
-      }
-      servlet.getMetrics().incrementSucessfulPutRequests(1);
-      return Response.ok().build();
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedPutRequests(1);
-      return processException(e);
-    }
-  }
-
-  private Response update(final TableSchemaModel model, final boolean replace,
-      final UriInfo uriInfo) {
-    try {
-      byte[] name = Bytes.toBytes(tableResource.getName());
-      HBaseAdmin admin = servlet.getAdmin();
-      if (replace || !admin.tableExists(name)) {
-        return replace(name, model, uriInfo, admin);
-      } else {
-        return update(name, model, uriInfo, admin);
-      }
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedPutRequests(1);
-      return processException(e);
-    }
-  }
-
-  @PUT
-  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF})
-  public Response put(final TableSchemaModel model, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    return update(model, true, uriInfo);
-  }
-
-  @POST
-  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF})
-  public Response post(final TableSchemaModel model, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    return update(model, false, uriInfo);
-  }
-
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
-      justification="Expected")
-  @DELETE
-  public Response delete(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    if (servlet.isReadOnly()) {
-      return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
-          .entity("Forbidden" + CRLF).build();
-    }
-    try {
-      HBaseAdmin admin = servlet.getAdmin();
-      try {
-        admin.disableTable(tableResource.getName());
-      } catch (TableNotEnabledException e) { /* this is what we want anyway */ }
-      admin.deleteTable(tableResource.getName());
-      servlet.getMetrics().incrementSucessfulDeleteRequests(1);
-      return Response.ok().build();
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedDeleteRequests(1);
-      return processException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
deleted file mode 100644
index a7e52bd..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-
-@InterfaceAudience.Private
-public class StorageClusterStatusResource extends ResourceBase {
-  private static final Log LOG =
-    LogFactory.getLog(StorageClusterStatusResource.class);
-
-  static CacheControl cacheControl;
-  static {
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  /**
-   * Constructor
-   * @throws IOException
-   */
-  public StorageClusterStatusResource() throws IOException {
-    super();
-  }
-
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF})
-  public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      ClusterStatus status = servlet.getAdmin().getClusterStatus();
-      StorageClusterStatusModel model = new StorageClusterStatusModel();
-      model.setRegions(status.getRegionsCount());
-      model.setRequests(status.getRequestsCount());
-      model.setAverageLoad(status.getAverageLoad());
-      for (ServerName info: status.getServers()) {
-        ServerLoad load = status.getLoad(info);
-        StorageClusterStatusModel.Node node =
-          model.addLiveNode(
-            info.getHostname() + ":" +
-            Integer.toString(info.getPort()),
-            info.getStartcode(), load.getUsedHeapMB(),
-            load.getMaxHeapMB());
-        node.setRequests(load.getNumberOfRequests());
-        for (RegionLoad region: load.getRegionsLoad().values()) {
-          node.addRegion(region.getName(), region.getStores(),
-            region.getStorefiles(), region.getStorefileSizeMB(),
-            region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
-            region.getReadRequestsCount(), region.getWriteRequestsCount(),
-            region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
-            region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
-            region.getCurrentCompactedKVs());
-        }
-      }
-      for (ServerName name: status.getDeadServerNames()) {
-        model.addDeadNode(name.toString());
-      }
-      ResponseBuilder response = Response.ok(model);
-      response.cacheControl(cacheControl);
-      servlet.getMetrics().incrementSucessfulGetRequests(1);
-      return response.build();
-    } catch (IOException e) {
-      servlet.getMetrics().incrementFailedGetRequests(1);
-      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
-        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
-        .build();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
deleted file mode 100644
index 85e81f8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-
-@InterfaceAudience.Private
-public class StorageClusterVersionResource extends ResourceBase {
-  private static final Log LOG =
-    LogFactory.getLog(StorageClusterVersionResource.class);
-
-  static CacheControl cacheControl;
-  static {
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  /**
-   * Constructor
-   * @throws IOException
-   */
-  public StorageClusterVersionResource() throws IOException {
-    super();
-  }
-
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
-  public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      StorageClusterVersionModel model = new StorageClusterVersionModel();
-      model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion());
-      ResponseBuilder response = Response.ok(model);
-      response.cacheControl(cacheControl);
-      servlet.getMetrics().incrementSucessfulGetRequests(1);
-      return response.build();
-    } catch (IOException e) {
-      servlet.getMetrics().incrementFailedGetRequests(1);
-      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
-        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
-        .build();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
deleted file mode 100644
index caf1431..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.List;
-
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.Encoded;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class TableResource extends ResourceBase {
-
-  String table;
-  private static final Log LOG = LogFactory.getLog(TableResource.class);
-
-  /**
-   * Constructor
-   * @param table
-   * @throws IOException
-   */
-  public TableResource(String table) throws IOException {
-    super();
-    this.table = table;
-  }
-
-  /** @return the table name */
-  String getName() {
-    return table;
-  }
-
-  /**
-   * @return true if the table exists
-   * @throws IOException
-   */
-  boolean exists() throws IOException {
-    return servlet.getAdmin().tableExists(table);
-  }
-
-  @Path("exists")
-  public ExistsResource getExistsResource() throws IOException {
-    return new ExistsResource(this);
-  }
-
-  @Path("regions")
-  public RegionsResource getRegionsResource() throws IOException {
-    return new RegionsResource(this);
-  }
-
-  @Path("scanner")
-  public ScannerResource getScannerResource() throws IOException {
-    return new ScannerResource(this);
-  }
-
-  @Path("schema")
-  public SchemaResource getSchemaResource() throws IOException {
-    return new SchemaResource(this);
-  }
-
-  @Path("multiget")
-  public MultiRowResource getMultipleRowResource(
-          final @QueryParam("v") String versions) throws IOException {
-    return new MultiRowResource(this, versions);
-  }
-
-  @Path("{rowspec: [^*]+}")
-  public RowResource getRowResource(
-      // We need the @Encoded decorator so Jersey won't urldecode before
-      // the RowSpec constructor has a chance to parse
-      final @PathParam("rowspec") @Encoded String rowspec,
-      final @QueryParam("v") String versions,
-      final @QueryParam("check") String check) throws IOException {
-    return new RowResource(this, rowspec, versions, check);
-  }
-
-  @Path("{suffixglobbingspec: .*\\*/.+}")
-  public RowResource getRowResourceWithSuffixGlobbing(
-      // We need the @Encoded decorator so Jersey won't urldecode before
-      // the RowSpec constructor has a chance to parse
-      final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec,
-      final @QueryParam("v") String versions,
-      final @QueryParam("check") String check) throws IOException {
-    return new RowResource(this, suffixglobbingspec, versions, check);
-  }
-
-  @Path("{scanspec: .*[*]$}")
-  public TableScanResource  getScanResource(
-      final @Context UriInfo uriInfo,
-      final @PathParam("scanspec") String scanSpec,
-      final @HeaderParam("Accept") String contentType,
-      @DefaultValue(Integer.MAX_VALUE + "")
-      @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
-      @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
-      @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
-      @DefaultValue("") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
-      @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
-      @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
-      @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
-      @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
-      @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks, 
-      @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String filters) {
-    try {
-      Filter filter = null;
-      if (scanSpec.indexOf('*') > 0) {
-        String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
-        filter = new PrefixFilter(Bytes.toBytes(prefix));
-      }
-      LOG.debug("Query parameters  : Table Name = > " + this.table + " Start Row => " + startRow
-          + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
-          + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
-          + maxVersions + " Batch Size => " + batchSize);
-      Table hTable = RESTServlet.getInstance().getTable(this.table);
-      Scan tableScan = new Scan();
-      tableScan.setBatch(batchSize);
-      tableScan.setMaxVersions(maxVersions);
-      tableScan.setTimeRange(startTime, endTime);
-      tableScan.setStartRow(Bytes.toBytes(startRow));
-      tableScan.setStopRow(Bytes.toBytes(endRow));
-      for (String csplit : column) {
-        String[] familysplit = csplit.trim().split(":");
-        if (familysplit.length == 2) {
-          if (familysplit[1].length() > 0) {
-            LOG.debug("Scan family and column : " + familysplit[0] + "  " + familysplit[1]);
-            tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
-          } else {
-            tableScan.addFamily(Bytes.toBytes(familysplit[0]));
-            LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier.");
-            tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
-          }
-        } else if (StringUtils.isNotEmpty(familysplit[0])){
-          LOG.debug("Scan family : " + familysplit[0]);
-          tableScan.addFamily(Bytes.toBytes(familysplit[0]));
-        }
-      }
-      FilterList filterList = null;
-      if (StringUtils.isNotEmpty(filters)) {
-          ParseFilter pf = new ParseFilter();
-          Filter filterParam = pf.parseFilterString(filters);
-          if (filter != null) {
-            filterList = new FilterList(filter, filterParam);
-          }
-          else {
-            filter = filterParam;
-          }
-      }
-      if (filterList != null) {
-        tableScan.setFilter(filterList);
-      } else if (filter != null) {
-        tableScan.setFilter(filter);
-      }
-      int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
-      tableScan.setCaching(fetchSize);
-     return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
-    } catch (Exception exp) {
-      servlet.getMetrics().incrementFailedScanRequests(1);
-      processException(exp);
-      LOG.warn(exp);
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
deleted file mode 100644
index 5cc2c7b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.HeaderParam;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.codehaus.jackson.annotate.JsonIgnore;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-@InterfaceAudience.Private
-public class TableScanResource  extends ResourceBase {
-
-  private static final Log LOG = LogFactory.getLog(TableScanResource.class);
-  TableResource tableResource;
-  ResultScanner results;
-  int userRequestedLimit;
-
-  public TableScanResource(ResultScanner scanner, int userRequestedLimit) throws IOException {
-    super();
-    this.results = scanner;
-    this.userRequestedLimit = userRequestedLimit;
-  }
-
-  @GET
-  @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON })
-  public CellSetModelStream get(final @Context UriInfo uriInfo) {
-    servlet.getMetrics().incrementRequests(1);
-    final int rowsToSend = userRequestedLimit;
-    servlet.getMetrics().incrementSucessfulScanRequests(1);
-    final Iterator<Result> itr = results.iterator();
-    return new CellSetModelStream(new ArrayList<RowModel>() {
-      public Iterator<RowModel> iterator() {
-        return new Iterator<RowModel>() {
-          int count = rowsToSend;
-
-          @Override
-          public boolean hasNext() {
-            if (count > 0) {
-              return itr.hasNext();
-            } else {
-              return false;
-            }
-          }
-
-          @Override
-          public void remove() {
-            throw new UnsupportedOperationException(
-                "Remove method cannot be used in CellSetModelStream");
-          }
-
-          @Override
-          public RowModel next() {
-            Result rs = itr.next();
-            if ((rs == null) || (count <= 0)) {
-              return null;
-            }
-            byte[] rowKey = rs.getRow();
-            RowModel rModel = new RowModel(rowKey);
-            List<Cell> kvs = rs.listCells();
-            for (Cell kv : kvs) {
-              rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
-                  kv.getTimestamp(), CellUtil.cloneValue(kv)));
-            }
-            count--;
-            return rModel;
-          }
-        };
-      }
-    });
-  }
-
-  @GET
-  @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF })
-  public Response getProtobuf(
-      final @Context UriInfo uriInfo,
-      final @PathParam("scanspec") String scanSpec,
-      final @HeaderParam("Accept") String contentType,
-      @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
-      @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
-      @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
-      @DefaultValue("column") @QueryParam(Constants.SCAN_COLUMN) List<String> column,
-      @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
-      @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
-      @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
-      @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
-      @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks) {
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
-      ProtobufStreamingUtil stream = new ProtobufStreamingUtil(this.results, contentType,
-          userRequestedLimit, fetchSize);
-      servlet.getMetrics().incrementSucessfulScanRequests(1);
-      ResponseBuilder response = Response.ok(stream);
-      response.header("content-type", contentType);
-      return response.build();
-    } catch (Exception exp) {
-      servlet.getMetrics().incrementFailedScanRequests(1);
-      processException(exp);
-      LOG.warn(exp);
-      return null;
-    }
-  }
-
-  @XmlRootElement(name = "CellSet")
-  @XmlAccessorType(XmlAccessType.FIELD)
-  public static class CellSetModelStream {
-    // JAXB needs an arraylist for streaming
-    @XmlElement(name = "Row")
-    @JsonIgnore
-    private ArrayList<RowModel> Row;
-
-    public CellSetModelStream() {
-    }
-
-    public CellSetModelStream(final ArrayList<RowModel> rowList) {
-      this.Row = rowList;
-    }
-
-    // jackson needs an iterator for streaming
-    @JsonProperty("Row")
-    public Iterator<RowModel> getIterator() {
-      return Row.iterator();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
deleted file mode 100644
index ae93825..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.servlet.ServletContext;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-
-/**
- * Implements REST software version reporting
- * <p>
- * <tt>/version/rest</tt>
- * <p>
- * <tt>/version</tt> (alias for <tt>/version/rest</tt>)
- */
-@InterfaceAudience.Private
-public class VersionResource extends ResourceBase {
-
-  private static final Log LOG = LogFactory.getLog(VersionResource.class);
-
-  static CacheControl cacheControl;
-  static {
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  /**
-   * Constructor
-   * @throws IOException
-   */
-  public VersionResource() throws IOException {
-    super();
-  }
-
-  /**
-   * Build a response for a version request.
-   * @param context servlet context
-   * @param uriInfo (JAX-RS context variable) request URL
-   * @return a response for a version request 
-   */
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF})
-  public Response get(final @Context ServletContext context, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    ResponseBuilder response = Response.ok(new VersionModel(context));
-    response.cacheControl(cacheControl);
-    servlet.getMetrics().incrementSucessfulGetRequests(1);
-    return response.build();
-  }
-
-  /**
-   * Dispatch to StorageClusterVersionResource
-   */
-  @Path("cluster")
-  public StorageClusterVersionResource getClusterVersionResource() 
-      throws IOException {
-    return new StorageClusterVersionResource();
-  }
-
-  /**
-   * Dispatch <tt>/version/rest</tt> to self.
-   */
-  @Path("rest")
-  public VersionResource getVersionResource() {
-    return this;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
deleted file mode 100644
index ebedf57..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.commons.httpclient.HttpClient;
-import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.HttpVersion;
-import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
-import org.apache.commons.httpclient.URI;
-import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
-import org.apache.commons.httpclient.methods.DeleteMethod;
-import org.apache.commons.httpclient.methods.GetMethod;
-import org.apache.commons.httpclient.methods.HeadMethod;
-import org.apache.commons.httpclient.methods.PostMethod;
-import org.apache.commons.httpclient.methods.PutMethod;
-import org.apache.commons.httpclient.params.HttpClientParams;
-import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * A wrapper around HttpClient which provides some useful function and
- * semantics for interacting with the REST gateway.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Client {
-  public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
-
-  private static final Log LOG = LogFactory.getLog(Client.class);
-
-  private HttpClient httpClient;
-  private Cluster cluster;
-  private boolean sslEnabled;
-
-  private Map<String, String> extraHeaders;
-
-  /**
-   * Default Constructor
-   */
-  public Client() {
-    this(null);
-  }
-
-  private void initialize(Cluster cluster, boolean sslEnabled) {
-    this.cluster = cluster;
-    this.sslEnabled = sslEnabled;
-    MultiThreadedHttpConnectionManager manager =
-      new MultiThreadedHttpConnectionManager();
-    HttpConnectionManagerParams managerParams = manager.getParams();
-    managerParams.setConnectionTimeout(2000); // 2 s
-    managerParams.setDefaultMaxConnectionsPerHost(10);
-    managerParams.setMaxTotalConnections(100);
-    extraHeaders = new ConcurrentHashMap<String, String>();
-    this.httpClient = new HttpClient(manager);
-    HttpClientParams clientParams = httpClient.getParams();
-    clientParams.setVersion(HttpVersion.HTTP_1_1);
-
-  }
-  /**
-   * Constructor
-   * @param cluster the cluster definition
-   */
-  public Client(Cluster cluster) {
-    initialize(cluster, false);
-  }
-
-  /**
-   * Constructor
-   * @param cluster the cluster definition
-   * @param sslEnabled enable SSL or not
-   */
-  public Client(Cluster cluster, boolean sslEnabled) {
-    initialize(cluster, sslEnabled);
-  }
-
-  /**
-   * Shut down the client. Close any open persistent connections. 
-   */
-  public void shutdown() {
-    MultiThreadedHttpConnectionManager manager = 
-      (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
-    manager.shutdown();
-  }
-
-  /**
-   * @return the wrapped HttpClient
-   */
-  public HttpClient getHttpClient() {
-    return httpClient;
-  }
-
-  /**
-   * Add extra headers.  These extra headers will be applied to all http
-   * methods before they are removed. If any header is not used any more,
-   * client needs to remove it explicitly.
-   */
-  public void addExtraHeader(final String name, final String value) {
-    extraHeaders.put(name, value);
-  }
-
-  /**
-   * Get an extra header value.
-   */
-  public String getExtraHeader(final String name) {
-    return extraHeaders.get(name);
-  }
-
-  /**
-   * Get all extra headers (read-only).
-   */
-  public Map<String, String> getExtraHeaders() {
-    return Collections.unmodifiableMap(extraHeaders);
-  }
-
-  /**
-   * Remove an extra header.
-   */
-  public void removeExtraHeader(final String name) {
-    extraHeaders.remove(name);
-  }
-
-  /**
-   * Execute a transaction method given only the path. Will select at random
-   * one of the members of the supplied cluster definition and iterate through
-   * the list until a transaction can be successfully completed. The
-   * definition of success here is a complete HTTP transaction, irrespective
-   * of result code.  
-   * @param cluster the cluster definition
-   * @param method the transaction method
-   * @param headers HTTP header values to send
-   * @param path the properly urlencoded path
-   * @return the HTTP response code
-   * @throws IOException
-   */
-  public int executePathOnly(Cluster cluster, HttpMethod method,
-      Header[] headers, String path) throws IOException {
-    IOException lastException;
-    if (cluster.nodes.size() < 1) {
-      throw new IOException("Cluster is empty");
-    }
-    int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
-    int i = start;
-    do {
-      cluster.lastHost = cluster.nodes.get(i);
-      try {
-        StringBuilder sb = new StringBuilder();
-        if (sslEnabled) {
-          sb.append("https://");
-        } else {
-          sb.append("http://");
-        }
-        sb.append(cluster.lastHost);
-        sb.append(path);
-        URI uri = new URI(sb.toString(), true);
-        return executeURI(method, headers, uri.toString());
-      } catch (IOException e) {
-        lastException = e;
-      }
-    } while (++i != start && i < cluster.nodes.size());
-    throw lastException;
-  }
-
-  /**
-   * Execute a transaction method given a complete URI.
-   * @param method the transaction method
-   * @param headers HTTP header values to send
-   * @param uri a properly urlencoded URI
-   * @return the HTTP response code
-   * @throws IOException
-   */
-  public int executeURI(HttpMethod method, Header[] headers, String uri)
-      throws IOException {
-    method.setURI(new URI(uri, true));
-    for (Map.Entry<String, String> e: extraHeaders.entrySet()) {
-      method.addRequestHeader(e.getKey(), e.getValue());
-    }
-    if (headers != null) {
-      for (Header header: headers) {
-        method.addRequestHeader(header);
-      }
-    }
-    long startTime = System.currentTimeMillis();
-    int code = httpClient.executeMethod(method);
-    long endTime = System.currentTimeMillis();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(method.getName() + " " + uri + " " + code + " " +
-        method.getStatusText() + " in " + (endTime - startTime) + " ms");
-    }
-    return code;
-  }
-
-  /**
-   * Execute a transaction method. Will call either <tt>executePathOnly</tt>
-   * or <tt>executeURI</tt> depending on whether a path only is supplied in
-   * 'path', or if a complete URI is passed instead, respectively.
-   * @param cluster the cluster definition
-   * @param method the HTTP method
-   * @param headers HTTP header values to send
-   * @param path the properly urlencoded path or URI
-   * @return the HTTP response code
-   * @throws IOException
-   */
-  public int execute(Cluster cluster, HttpMethod method, Header[] headers,
-      String path) throws IOException {
-    if (path.startsWith("/")) {
-      return executePathOnly(cluster, method, headers, path);
-    }
-    return executeURI(method, headers, path);
-  }
-
-  /**
-   * @return the cluster definition
-   */
-  public Cluster getCluster() {
-    return cluster;
-  }
-
-  /**
-   * @param cluster the cluster definition
-   */
-  public void setCluster(Cluster cluster) {
-    this.cluster = cluster;
-  }
-
-  /**
-   * Send a HEAD request 
-   * @param path the path or URI
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response head(String path) throws IOException {
-    return head(cluster, path, null);
-  }
-
-  /**
-   * Send a HEAD request 
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param headers the HTTP headers to include in the request
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response head(Cluster cluster, String path, Header[] headers) 
-      throws IOException {
-    HeadMethod method = new HeadMethod();
-    try {
-      int code = execute(cluster, method, null, path);
-      headers = method.getResponseHeaders();
-      return new Response(code, headers, null);
-    } finally {
-      method.releaseConnection();
-    }
-  }
-
-  /**
-   * Send a GET request 
-   * @param path the path or URI
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(String path) throws IOException {
-    return get(cluster, path);
-  }
-
-  /**
-   * Send a GET request 
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(Cluster cluster, String path) throws IOException {
-    return get(cluster, path, EMPTY_HEADER_ARRAY);
-  }
-
-  /**
-   * Send a GET request 
-   * @param path the path or URI
-   * @param accept Accept header value
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(String path, String accept) throws IOException {
-    return get(cluster, path, accept);
-  }
-
-  /**
-   * Send a GET request 
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param accept Accept header value
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(Cluster cluster, String path, String accept)
-      throws IOException {
-    Header[] headers = new Header[1];
-    headers[0] = new Header("Accept", accept);
-    return get(cluster, path, headers);
-  }
-
-  /**
-   * Send a GET request
-   * @param path the path or URI
-   * @param headers the HTTP headers to include in the request, 
-   * <tt>Accept</tt> must be supplied
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(String path, Header[] headers) throws IOException {
-    return get(cluster, path, headers);
-  }
-
-  /**
-   * Send a GET request
-   * @param c the cluster definition
-   * @param path the path or URI
-   * @param headers the HTTP headers to include in the request
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response get(Cluster c, String path, Header[] headers) 
-      throws IOException {
-    GetMethod method = new GetMethod();
-    try {
-      int code = execute(c, method, headers, path);
-      headers = method.getResponseHeaders();
-      byte[] body = method.getResponseBody();
-      InputStream in = method.getResponseBodyAsStream();
-      return new Response(code, headers, body, in);
-    } finally {
-      method.releaseConnection();
-    }
-  }
-
-  /**
-   * Send a PUT request
-   * @param path the path or URI
-   * @param contentType the content MIME type
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response put(String path, String contentType, byte[] content)
-      throws IOException {
-    return put(cluster, path, contentType, content);
-  }
-
-  /**
-   * Send a PUT request
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param contentType the content MIME type
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response put(Cluster cluster, String path, String contentType, 
-      byte[] content) throws IOException {
-    Header[] headers = new Header[1];
-    headers[0] = new Header("Content-Type", contentType);
-    return put(cluster, path, headers, content);
-  }
-
-  /**
-   * Send a PUT request
-   * @param path the path or URI
-   * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
-   * supplied
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response put(String path, Header[] headers, byte[] content) 
-      throws IOException {
-    return put(cluster, path, headers, content);
-  }
-
-  /**
-   * Send a PUT request
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
-   * supplied
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response put(Cluster cluster, String path, Header[] headers, 
-      byte[] content) throws IOException {
-    PutMethod method = new PutMethod();
-    try {
-      method.setRequestEntity(new ByteArrayRequestEntity(content));
-      int code = execute(cluster, method, headers, path);
-      headers = method.getResponseHeaders();
-      content = method.getResponseBody();
-      return new Response(code, headers, content);
-    } finally {
-      method.releaseConnection();
-    }
-  }
-
-  /**
-   * Send a POST request
-   * @param path the path or URI
-   * @param contentType the content MIME type
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response post(String path, String contentType, byte[] content)
-      throws IOException {
-    return post(cluster, path, contentType, content);
-  }
-
-  /**
-   * Send a POST request
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param contentType the content MIME type
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response post(Cluster cluster, String path, String contentType, 
-      byte[] content) throws IOException {
-    Header[] headers = new Header[1];
-    headers[0] = new Header("Content-Type", contentType);
-    return post(cluster, path, headers, content);
-  }
-
-  /**
-   * Send a POST request
-   * @param path the path or URI
-   * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
-   * supplied
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response post(String path, Header[] headers, byte[] content) 
-      throws IOException {
-    return post(cluster, path, headers, content);
-  }
-
-  /**
-   * Send a POST request
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be
-   * supplied
-   * @param content the content bytes
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response post(Cluster cluster, String path, Header[] headers, 
-      byte[] content) throws IOException {
-    PostMethod method = new PostMethod();
-    try {
-      method.setRequestEntity(new ByteArrayRequestEntity(content));
-      int code = execute(cluster, method, headers, path);
-      headers = method.getResponseHeaders();
-      content = method.getResponseBody();
-      return new Response(code, headers, content);
-    } finally {
-      method.releaseConnection();
-    }
-  }
-
-  /**
-   * Send a DELETE request
-   * @param path the path or URI
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response delete(String path) throws IOException {
-    return delete(cluster, path);
-  }
-
-  /**
-   * Send a DELETE request
-   * @param cluster the cluster definition
-   * @param path the path or URI
-   * @return a Response object with response detail
-   * @throws IOException
-   */
-  public Response delete(Cluster cluster, String path) throws IOException {
-    DeleteMethod method = new DeleteMethod();
-    try {
-      int code = execute(cluster, method, null, path);
-      Header[] headers = method.getResponseHeaders();
-      byte[] content = method.getResponseBody();
-      return new Response(code, headers, content);
-    } finally {
-      method.releaseConnection();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
deleted file mode 100644
index a2de329..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * A list of 'host:port' addresses of HTTP servers operating as a single
- * entity, for example multiple redundant web service gateways.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class Cluster {
-  protected List<String> nodes = 
-    Collections.synchronizedList(new ArrayList<String>());
-  protected String lastHost;
-
-  /**
-   * Constructor
-   */
-  public Cluster() {}
-
-  /**
-   * Constructor
-   * @param nodes a list of service locations, in 'host:port' format
-   */
-  public Cluster(List<String> nodes) {
-    nodes.addAll(nodes);
-  }
-
-  /**
-   * @return true if no locations have been added, false otherwise
-   */
-  public boolean isEmpty() {
-    return nodes.isEmpty();
-  }
-
-  /**
-   * Add a node to the cluster
-   * @param node the service location in 'host:port' format
-   */
-  public Cluster add(String node) {
-    nodes.add(node);
-    return this;
-  }
-
-  /**
-   * Add a node to the cluster
-   * @param name host name
-   * @param port service port
-   */
-  public Cluster add(String name, int port) {
-    StringBuilder sb = new StringBuilder();
-    sb.append(name);
-    sb.append(':');
-    sb.append(port);
-    return add(sb.toString());
-  }
-
-  /**
-   * Remove a node from the cluster
-   * @param node the service location in 'host:port' format
-   */
-  public Cluster remove(String node) {
-    nodes.remove(node);
-    return this;
-  }
-
-  /**
-   * Remove a node from the cluster
-   * @param name host name
-   * @param port service port
-   */
-  public Cluster remove(String name, int port) {
-    StringBuilder sb = new StringBuilder();
-    sb.append(name);
-    sb.append(':');
-    sb.append(port);
-    return remove(sb.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
deleted file mode 100644
index 2809ca9..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest.client;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
-import org.apache.hadoop.hbase.rest.model.TableListModel;
-import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
-import org.apache.hadoop.hbase.rest.model.VersionModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class RemoteAdmin {
-
-  final Client client;
-  final Configuration conf;
-  final String accessToken;
-  final int maxRetries;
-  final long sleepTime;
-
-  // This unmarshaller is necessary for getting the /version/cluster resource.
-  // This resource does not support protobufs. Therefore this is necessary to
-  // request/interpret it as XML.
-  private static volatile Unmarshaller versionClusterUnmarshaller;
-
-  /**
-   * Constructor
-   * 
-   * @param client
-   * @param conf
-   */
-  public RemoteAdmin(Client client, Configuration conf) {
-    this(client, conf, null);
-  }
-
-  static Unmarshaller getUnmarsheller() throws JAXBException {
-
-    if (versionClusterUnmarshaller == null) {
-
-      RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance(
-          StorageClusterVersionModel.class).createUnmarshaller();
-    }
-    return RemoteAdmin.versionClusterUnmarshaller;
-  }
-
-  /**
-   * Constructor
-   * @param client
-   * @param conf
-   * @param accessToken
-   */
-  public RemoteAdmin(Client client, Configuration conf, String accessToken) {
-    this.client = client;
-    this.conf = conf;
-    this.accessToken = accessToken;
-    this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10);
-    this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
-  }
-
-  /**
-   * @param tableName name of table to check
-   * @return true if all regions of the table are available
-   * @throws IOException if a remote or network exception occurs
-   */
-  public boolean isTableAvailable(String tableName) throws IOException {
-    return isTableAvailable(Bytes.toBytes(tableName));
-  }
-
-  /**
-   * @return string representing the rest api's version
-   * @throws IOEXception
-   *           if the endpoint does not exist, there is a timeout, or some other
-   *           general failure mode
-   */
-  public VersionModel getRestVersion() throws IOException {
-
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-
-    path.append("version/rest");
-
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.get(path.toString(),
-          Constants.MIMETYPE_PROTOBUF);
-      code = response.getCode();
-      switch (code) {
-      case 200:
-
-        VersionModel v = new VersionModel();
-        return (VersionModel) v.getObjectFromMessage(response.getBody());
-      case 404:
-        throw new IOException("REST version not found");
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("get request to " + path.toString()
-            + " returned " + code);
-      }
-    }
-    throw new IOException("get request to " + path.toString() + " timed out");
-  }
-
-  /**
-   * @return string representing the cluster's version
-   * @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
-   */
-  public StorageClusterStatusModel getClusterStatus() throws IOException {
-
-      StringBuilder path = new StringBuilder();
-      path.append('/');
-      if (accessToken !=null) {
-          path.append(accessToken);
-          path.append('/');
-      }
-
-    path.append("status/cluster");
-
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.get(path.toString(),
-          Constants.MIMETYPE_PROTOBUF);
-      code = response.getCode();
-      switch (code) {
-      case 200:
-        StorageClusterStatusModel s = new StorageClusterStatusModel();
-        return (StorageClusterStatusModel) s.getObjectFromMessage(response
-            .getBody());
-      case 404:
-        throw new IOException("Cluster version not found");
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("get request to " + path + " returned " + code);
-      }
-    }
-    throw new IOException("get request to " + path + " timed out");
-  }
-
-  /**
-   * @return string representing the cluster's version
-   * @throws IOEXception
-   *           if the endpoint does not exist, there is a timeout, or some other
-   *           general failure mode
-   */
-  public StorageClusterVersionModel getClusterVersion() throws IOException {
-
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-
-    path.append("version/cluster");
-
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
-      code = response.getCode();
-      switch (code) {
-      case 200:
-        try {
-
-          return (StorageClusterVersionModel) getUnmarsheller().unmarshal(
-              new ByteArrayInputStream(response.getBody()));
-        } catch (JAXBException jaxbe) {
-
-          throw new IOException(
-              "Issue parsing StorageClusterVersionModel object in XML form: "
-                  + jaxbe.getLocalizedMessage());
-        }
-      case 404:
-        throw new IOException("Cluster version not found");
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException(path.toString() + " request returned " + code);
-      }
-    }
-    throw new IOException("get request to " + path.toString()
-        + " request timed out");
-  }
-
-  /**
-   * @param tableName name of table to check
-   * @return true if all regions of the table are available
-   * @throws IOException if a remote or network exception occurs
-   */
-  public boolean isTableAvailable(byte[] tableName) throws IOException {
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-    path.append(Bytes.toStringBinary(tableName));
-    path.append('/');
-    path.append("exists");
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF);
-      code = response.getCode();
-      switch (code) {
-      case 200:
-        return true;
-      case 404:
-        return false;
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("get request to " + path.toString() + " returned " + code);
-      }
-    }
-    throw new IOException("get request to " + path.toString() + " timed out");
-  }
-
-  /**
-   * Creates a new table.
-   * @param desc table descriptor for table
-   * @throws IOException if a remote or network exception occurs
-   */
-  public void createTable(HTableDescriptor desc)
-      throws IOException {
-    TableSchemaModel model = new TableSchemaModel(desc);
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-    path.append(desc.getTableName());
-    path.append('/');
-    path.append("schema");
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF,
-        model.createProtobufOutput());
-      code = response.getCode();
-      switch (code) {
-      case 201:
-        return;
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("create request to " + path.toString() + " returned " + code);
-      }
-    }
-    throw new IOException("create request to " + path.toString() + " timed out");
-  }
-
-  /**
-   * Deletes a table.
-   * @param tableName name of table to delete
-   * @throws IOException if a remote or network exception occurs
-   */
-  public void deleteTable(final String tableName) throws IOException {
-    deleteTable(Bytes.toBytes(tableName));
-  }
-
-  /**
-   * Deletes a table.
-   * @param tableName name of table to delete
-   * @throws IOException if a remote or network exception occurs
-   */
-  public void deleteTable(final byte [] tableName) throws IOException {
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-    path.append(Bytes.toStringBinary(tableName));
-    path.append('/');
-    path.append("schema");
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.delete(path.toString());
-      code = response.getCode();
-      switch (code) {
-      case 200:
-        return;
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("delete request to " + path.toString() + " returned " + code);
-      }
-    }
-    throw new IOException("delete request to " + path.toString() + " timed out");
-  }
-
-  /**
-   * @return string representing the cluster's version
-   * @throws IOEXception
-   *           if the endpoint does not exist, there is a timeout, or some other
-   *           general failure mode
-   */
-  public TableListModel getTableList() throws IOException {
-
-    StringBuilder path = new StringBuilder();
-    path.append('/');
-    if (accessToken != null) {
-      path.append(accessToken);
-      path.append('/');
-    }
-
-    int code = 0;
-    for (int i = 0; i < maxRetries; i++) {
-      // Response response = client.get(path.toString(),
-      // Constants.MIMETYPE_XML);
-      Response response = client.get(path.toString(),
-          Constants.MIMETYPE_PROTOBUF);
-      code = response.getCode();
-      switch (code) {
-      case 200:
-        TableListModel t = new TableListModel();
-        return (TableListModel) t.getObjectFromMessage(response.getBody());
-      case 404:
-        throw new IOException("Table list not found");
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("get request to " + path.toString()
-            + " request returned " + code);
-      }
-    }
-    throw new IOException("get request to " + path.toString()
-        + " request timed out");
-  }
-}


Mime
View raw message