hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r928031 [2/5] - in /hadoop/hbase/trunk: contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ core/src/main/java/org/apache/hadoop/hbase/regionserver/...
Date Fri, 26 Mar 2010 19:33:28 GMT
Modified: hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java Fri Mar 26 19:33:27 2010
@@ -1,179 +1,179 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.stargate.model.ScannerModel;
-import org.apache.hadoop.util.StringUtils;
-
-public class ScannerResultGenerator extends ResultGenerator {
-
-  private static final Log LOG =
-    LogFactory.getLog(ScannerResultGenerator.class);
-
-  public static Filter buildFilterFromModel(final ScannerModel model) 
-      throws Exception {
-    String filter = model.getFilter();
-    if (filter == null || filter.length() == 0) {
-      return null;
-    }
-    return buildFilter(filter);
-  }
-
-  private String id;
-  private Iterator<KeyValue> rowI;
-  private KeyValue cache;
-  private ResultScanner scanner;
-  private Result cached;
-
-  public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
-      final Filter filter) throws IllegalArgumentException, IOException {
-    HTablePool pool = RESTServlet.getInstance().getTablePool(); 
-    HTableInterface table = pool.getTable(tableName);
-    try {
-      Scan scan;
-      if (rowspec.hasEndRow()) {
-        scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
-      } else {
-        scan = new Scan(rowspec.getStartRow());
-      }
-      if (rowspec.hasColumns()) {
-        byte[][] columns = rowspec.getColumns();
-        for (byte[] column: columns) {
-          byte[][] split = KeyValue.parseColumn(column);
-          if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
-            scan.addColumn(split[0], split[1]);
-          } else {
-            scan.addFamily(split[0]);
-          }
-        }
-      } else {
-        for (HColumnDescriptor family: 
-            table.getTableDescriptor().getFamilies()) {
-          scan.addFamily(family.getName());
-        }
-      }
-      scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());          
-      scan.setMaxVersions(rowspec.getMaxVersions());
-      if (filter != null) {
-        scan.setFilter(filter);
-      }
-      // always disable block caching on the cluster when scanning
-      scan.setCacheBlocks(false);
-      scanner = table.getScanner(scan);
-      cached = null;
-      id = Long.toString(System.currentTimeMillis()) +
-             Integer.toHexString(scanner.hashCode());
-    } finally {
-      pool.putTable(table);
-    }
-  }
-
-  public String getID() {
-    return id;
-  }
-
-  public void close() {
-  }
-
-  public boolean hasNext() {
-    if (cache != null) {
-      return true;
-    }
-    if (rowI != null && rowI.hasNext()) {
-      return true;
-    }
-    if (cached != null) {
-      return true;
-    }
-    try {
-      Result result = scanner.next();
-      if (result != null && !result.isEmpty()) {
-        cached = result;
-      }
-    } catch (UnknownScannerException e) {
-      throw new IllegalArgumentException(e);
-    } catch (IOException e) {
-      LOG.error(StringUtils.stringifyException(e));
-    }
-    return cached != null;
-  }
-
-  public KeyValue next() {
-    if (cache != null) {
-      KeyValue kv = cache;
-      cache = null;
-      return kv;
-    }
-    boolean loop;
-    do {
-      loop = false;
-      if (rowI != null) {
-        if (rowI.hasNext()) {
-          return rowI.next();
-        } else {
-          rowI = null;
-        }
-      }
-      if (cached != null) {
-        rowI = cached.list().iterator();
-        loop = true;
-        cached = null;
-      } else {
-        Result result = null;
-        try {
-          result = scanner.next();
-        } catch (UnknownScannerException e) {
-          throw new IllegalArgumentException(e);
-        } catch (IOException e) {
-          LOG.error(StringUtils.stringifyException(e));
-        }
-        if (result != null && !result.isEmpty()) {
-          rowI = result.list().iterator();
-          loop = true;
-        }
-      }
-    } while (loop);
-    return null;
-  }
-
-  public void putBack(KeyValue kv) {
-    this.cache = kv;
-  }
-
-  public void remove() {
-    throw new UnsupportedOperationException("remove not supported");
-  }
-
-}
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.util.StringUtils;
+
+public class ScannerResultGenerator extends ResultGenerator {
+
+  private static final Log LOG =
+    LogFactory.getLog(ScannerResultGenerator.class);
+
+  public static Filter buildFilterFromModel(final ScannerModel model) 
+      throws Exception {
+    String filter = model.getFilter();
+    if (filter == null || filter.length() == 0) {
+      return null;
+    }
+    return buildFilter(filter);
+  }
+
+  private String id;
+  private Iterator<KeyValue> rowI;
+  private KeyValue cache;
+  private ResultScanner scanner;
+  private Result cached;
+
+  public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+      final Filter filter) throws IllegalArgumentException, IOException {
+    HTablePool pool = RESTServlet.getInstance().getTablePool(); 
+    HTableInterface table = pool.getTable(tableName);
+    try {
+      Scan scan;
+      if (rowspec.hasEndRow()) {
+        scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
+      } else {
+        scan = new Scan(rowspec.getStartRow());
+      }
+      if (rowspec.hasColumns()) {
+        byte[][] columns = rowspec.getColumns();
+        for (byte[] column: columns) {
+          byte[][] split = KeyValue.parseColumn(column);
+          if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
+            scan.addColumn(split[0], split[1]);
+          } else {
+            scan.addFamily(split[0]);
+          }
+        }
+      } else {
+        for (HColumnDescriptor family: 
+            table.getTableDescriptor().getFamilies()) {
+          scan.addFamily(family.getName());
+        }
+      }
+      scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());          
+      scan.setMaxVersions(rowspec.getMaxVersions());
+      if (filter != null) {
+        scan.setFilter(filter);
+      }
+      // always disable block caching on the cluster when scanning
+      scan.setCacheBlocks(false);
+      scanner = table.getScanner(scan);
+      cached = null;
+      id = Long.toString(System.currentTimeMillis()) +
+             Integer.toHexString(scanner.hashCode());
+    } finally {
+      pool.putTable(table);
+    }
+  }
+
+  public String getID() {
+    return id;
+  }
+
+  public void close() {
+  }
+
+  public boolean hasNext() {
+    if (cache != null) {
+      return true;
+    }
+    if (rowI != null && rowI.hasNext()) {
+      return true;
+    }
+    if (cached != null) {
+      return true;
+    }
+    try {
+      Result result = scanner.next();
+      if (result != null && !result.isEmpty()) {
+        cached = result;
+      }
+    } catch (UnknownScannerException e) {
+      throw new IllegalArgumentException(e);
+    } catch (IOException e) {
+      LOG.error(StringUtils.stringifyException(e));
+    }
+    return cached != null;
+  }
+
+  public KeyValue next() {
+    if (cache != null) {
+      KeyValue kv = cache;
+      cache = null;
+      return kv;
+    }
+    boolean loop;
+    do {
+      loop = false;
+      if (rowI != null) {
+        if (rowI.hasNext()) {
+          return rowI.next();
+        } else {
+          rowI = null;
+        }
+      }
+      if (cached != null) {
+        rowI = cached.list().iterator();
+        loop = true;
+        cached = null;
+      } else {
+        Result result = null;
+        try {
+          result = scanner.next();
+        } catch (UnknownScannerException e) {
+          throw new IllegalArgumentException(e);
+        } catch (IOException e) {
+          LOG.error(StringUtils.stringifyException(e));
+        }
+        if (result != null && !result.isEmpty()) {
+          rowI = result.list().iterator();
+          loop = true;
+        }
+      }
+    } while (loop);
+    return null;
+  }
+
+  public void putBack(KeyValue kv) {
+    this.cache = kv;
+  }
+
+  public void remove() {
+    throw new UnsupportedOperationException("remove not supported");
+  }
+
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java Fri Mar 26 19:33:27 2010
@@ -1,259 +1,259 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.xml.namespace.QName;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-public class SchemaResource implements Constants {
-  private static final Log LOG = LogFactory.getLog(SchemaResource.class);
-
-  User user;
-  String tableName;
-  String actualTableName;
-  CacheControl cacheControl;
-  RESTServlet servlet;
-
-  public SchemaResource(User user, String table) throws IOException {
-    if (user != null) {
-      this.user = user;
-      this.actualTableName = 
-        !user.isAdmin() ? (user.getName() + "." + table) : table;
-    } else {
-      this.actualTableName = table;
-    }
-    this.tableName = table;
-    servlet = RESTServlet.getInstance();
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  private HTableDescriptor getTableSchema() throws IOException,
-      TableNotFoundException {
-    HTablePool pool = servlet.getTablePool();
-    HTableInterface table = pool.getTable(actualTableName);
-    try {
-      return table.getTableDescriptor();
-    } finally {
-      pool.putTable(table);
-    }
-  }
-
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
-  public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      HTableDescriptor htd = getTableSchema();
-      TableSchemaModel model = new TableSchemaModel();
-      model.setName(tableName);
-      for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-          htd.getValues().entrySet()) {
-        model.addAttribute(Bytes.toString(e.getKey().get()), 
-            Bytes.toString(e.getValue().get()));
-      }
-      for (HColumnDescriptor hcd: htd.getFamilies()) {
-        ColumnSchemaModel columnModel = new ColumnSchemaModel();
-        columnModel.setName(hcd.getNameAsString());
-        for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-          hcd.getValues().entrySet()) {
-        columnModel.addAttribute(Bytes.toString(e.getKey().get()), 
-          Bytes.toString(e.getValue().get()));
-      }
-        model.addColumnFamily(columnModel);
-      }
-      ResponseBuilder response = Response.ok(model);
-      response.cacheControl(cacheControl);
-      return response.build();
-    } catch (TableNotFoundException e) {
-      throw new WebApplicationException(Response.Status.NOT_FOUND);
-    } catch (IOException e) {
-      throw new WebApplicationException(e,
-                  Response.Status.SERVICE_UNAVAILABLE);
-    }
-  }
-
-  private Response replace(final byte[] tableName, 
-      final TableSchemaModel model, final UriInfo uriInfo,
-      final HBaseAdmin admin) {
-    try {
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
-        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-      }
-      for (ColumnSchemaModel family: model.getColumns()) {
-        HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
-        for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
-          hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-        }
-        htd.addFamily(hcd);
-      }
-      if (admin.tableExists(tableName)) {
-        admin.disableTable(tableName);
-        admin.modifyTable(tableName, htd);
-        admin.enableTable(tableName);
-      } else try {
-        admin.createTable(htd);
-      } catch (TableExistsException e) {
-        // race, someone else created a table with the same name
-        throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
-      }
-      return Response.created(uriInfo.getAbsolutePath()).build();
-    } catch (IOException e) {
-      throw new WebApplicationException(e, 
-            Response.Status.SERVICE_UNAVAILABLE);
-    }      
-  } 
-
-  private Response update(final byte[] tableName,final TableSchemaModel model,
-      final UriInfo uriInfo, final HBaseAdmin admin) {
-    try {
-      HTableDescriptor htd = admin.getTableDescriptor(tableName);
-      admin.disableTable(tableName);
-      try {
-        for (ColumnSchemaModel family: model.getColumns()) {
-          HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
-          for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
-            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
-          }
-          if (htd.hasFamily(hcd.getName())) {
-            admin.modifyColumn(tableName, hcd.getName(), hcd);
-          } else {
-            admin.addColumn(model.getName(), hcd);            
-          }
-        }
-      } catch (IOException e) {
-        throw new WebApplicationException(e, 
-            Response.Status.INTERNAL_SERVER_ERROR);
-      } finally {
-        admin.enableTable(tableName);
-      }
-      return Response.ok().build();
-    } catch (IOException e) {
-      throw new WebApplicationException(e,
-          Response.Status.SERVICE_UNAVAILABLE);
-    }
-  }
-
-  private Response update(final TableSchemaModel model, final boolean replace,
-      final UriInfo uriInfo) {
-    try {
-      servlet.invalidateMaxAge(tableName);
-      byte[] tableName = Bytes.toBytes(actualTableName);
-      HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
-      if (replace || !admin.tableExists(tableName)) {
-        return replace(tableName, model, uriInfo, admin);
-      } else {
-        return update(tableName, model, uriInfo, admin);
-      }
-    } catch (IOException e) {
-      throw new WebApplicationException(e, 
-            Response.Status.SERVICE_UNAVAILABLE);
-    }
-  }
-
-  @PUT
-  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
-  public Response put(final TableSchemaModel model, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    // use the name given in the path, but warn if the name on the path and
-    // the name in the schema are different
-    if (model.getName() != tableName) {
-      LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
-        model.getName() + "'");
-    }
-    return update(model, true, uriInfo);
-  }
-
-  @POST
-  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
-  public Response post(final TableSchemaModel model, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    // use the name given in the path, but warn if the name on the path and
-    // the name in the schema are different
-    if (model.getName() != tableName) {
-      LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
-        model.getName() + "'");
-    }
-    return update(model, false, uriInfo);
-  }
-
-  @DELETE
-  public Response delete(final @Context UriInfo uriInfo) {     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
-      admin.disableTable(actualTableName);
-      admin.deleteTable(actualTableName);
-      return Response.ok().build();
-    } catch (TableNotFoundException e) {
-      throw new WebApplicationException(Response.Status.NOT_FOUND);
-    } catch (IOException e) {
-      throw new WebApplicationException(e, 
-            Response.Status.SERVICE_UNAVAILABLE);
-    }
-  }
-
-}
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.xml.namespace.QName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class SchemaResource implements Constants {
+  private static final Log LOG = LogFactory.getLog(SchemaResource.class);
+
+  User user;
+  String tableName;
+  String actualTableName;
+  CacheControl cacheControl;
+  RESTServlet servlet;
+
+  public SchemaResource(User user, String table) throws IOException {
+    if (user != null) {
+      this.user = user;
+      this.actualTableName = 
+        !user.isAdmin() ? (user.getName() + "." + table) : table;
+    } else {
+      this.actualTableName = table;
+    }
+    this.tableName = table;
+    servlet = RESTServlet.getInstance();
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  private HTableDescriptor getTableSchema() throws IOException,
+      TableNotFoundException {
+    HTablePool pool = servlet.getTablePool();
+    HTableInterface table = pool.getTable(actualTableName);
+    try {
+      return table.getTableDescriptor();
+    } finally {
+      pool.putTable(table);
+    }
+  }
+
+  @GET
+  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+  public Response get(final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      HTableDescriptor htd = getTableSchema();
+      TableSchemaModel model = new TableSchemaModel();
+      model.setName(tableName);
+      for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+          htd.getValues().entrySet()) {
+        model.addAttribute(Bytes.toString(e.getKey().get()), 
+            Bytes.toString(e.getValue().get()));
+      }
+      for (HColumnDescriptor hcd: htd.getFamilies()) {
+        ColumnSchemaModel columnModel = new ColumnSchemaModel();
+        columnModel.setName(hcd.getNameAsString());
+        for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
+          hcd.getValues().entrySet()) {
+        columnModel.addAttribute(Bytes.toString(e.getKey().get()), 
+          Bytes.toString(e.getValue().get()));
+      }
+        model.addColumnFamily(columnModel);
+      }
+      ResponseBuilder response = Response.ok(model);
+      response.cacheControl(cacheControl);
+      return response.build();
+    } catch (TableNotFoundException e) {
+      throw new WebApplicationException(Response.Status.NOT_FOUND);
+    } catch (IOException e) {
+      throw new WebApplicationException(e,
+                  Response.Status.SERVICE_UNAVAILABLE);
+    }
+  }
+
+  private Response replace(final byte[] tableName, 
+      final TableSchemaModel model, final UriInfo uriInfo,
+      final HBaseAdmin admin) {
+    try {
+      HTableDescriptor htd = new HTableDescriptor(tableName);
+      for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
+        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+      }
+      for (ColumnSchemaModel family: model.getColumns()) {
+        HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+        for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+          hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+        }
+        htd.addFamily(hcd);
+      }
+      if (admin.tableExists(tableName)) {
+        admin.disableTable(tableName);
+        admin.modifyTable(tableName, htd);
+        admin.enableTable(tableName);
+      } else try {
+        admin.createTable(htd);
+      } catch (TableExistsException e) {
+        // race, someone else created a table with the same name
+        throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
+      }
+      return Response.created(uriInfo.getAbsolutePath()).build();
+    } catch (IOException e) {
+      throw new WebApplicationException(e, 
+            Response.Status.SERVICE_UNAVAILABLE);
+    }      
+  } 
+
+  private Response update(final byte[] tableName,final TableSchemaModel model,
+      final UriInfo uriInfo, final HBaseAdmin admin) {
+    try {
+      HTableDescriptor htd = admin.getTableDescriptor(tableName);
+      admin.disableTable(tableName);
+      try {
+        for (ColumnSchemaModel family: model.getColumns()) {
+          HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+          for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
+            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+          }
+          if (htd.hasFamily(hcd.getName())) {
+            admin.modifyColumn(tableName, hcd.getName(), hcd);
+          } else {
+            admin.addColumn(model.getName(), hcd);            
+          }
+        }
+      } catch (IOException e) {
+        throw new WebApplicationException(e, 
+            Response.Status.INTERNAL_SERVER_ERROR);
+      } finally {
+        admin.enableTable(tableName);
+      }
+      return Response.ok().build();
+    } catch (IOException e) {
+      throw new WebApplicationException(e,
+          Response.Status.SERVICE_UNAVAILABLE);
+    }
+  }
+
+  private Response update(final TableSchemaModel model, final boolean replace,
+      final UriInfo uriInfo) {
+    try {
+      servlet.invalidateMaxAge(tableName);
+      byte[] tableName = Bytes.toBytes(actualTableName);
+      HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
+      if (replace || !admin.tableExists(tableName)) {
+        return replace(tableName, model, uriInfo, admin);
+      } else {
+        return update(tableName, model, uriInfo, admin);
+      }
+    } catch (IOException e) {
+      throw new WebApplicationException(e, 
+            Response.Status.SERVICE_UNAVAILABLE);
+    }
+  }
+
+  @PUT
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+  public Response put(final TableSchemaModel model, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    // use the name given in the path, but warn if the name on the path and
+    // the name in the schema are different
+    if (model.getName() != tableName) {
+      LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
+        model.getName() + "'");
+    }
+    return update(model, true, uriInfo);
+  }
+
+  @POST
+  @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+  public Response post(final TableSchemaModel model, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    // use the name given in the path, but warn if the name on the path and
+    // the name in the schema are different
+    if (model.getName() != tableName) {
+      LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
+        model.getName() + "'");
+    }
+    return update(model, false, uriInfo);
+  }
+
+  @DELETE
+  public Response delete(final @Context UriInfo uriInfo) {     
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    try {
+      HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
+      admin.disableTable(actualTableName);
+      admin.deleteTable(actualTableName);
+      return Response.ok().build();
+    } catch (TableNotFoundException e) {
+      throw new WebApplicationException(Response.Status.NOT_FOUND);
+    } catch (IOException e) {
+      throw new WebApplicationException(e, 
+            Response.Status.SERVICE_UNAVAILABLE);
+    }
+  }
+
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java Fri Mar 26 19:33:27 2010
@@ -1,69 +1,69 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Response;
-
-import org.apache.hadoop.hbase.stargate.User;
-
-public class TableResource implements Constants {
-
-  User user;
-  String table;
-
-  public TableResource(User user, String table) {
-    this.user = user;
-    this.table = table;
-  }
-
-  @Path("regions")
-  public RegionsResource getRegionsResource() throws IOException {
-    return new RegionsResource(user, table);
-  }
-
-  @Path("scanner")
-  public ScannerResource getScannerResource() throws IOException {
-    return new ScannerResource(user, table);
-  }
-
-  @Path("schema")
-  public SchemaResource getSchemaResource() throws IOException {
-    return new SchemaResource(user, table);
-  }
-
-  @Path("{rowspec: .+}")
-  public RowResource getRowResource(
-      final @PathParam("rowspec") String rowspec,
-      final @QueryParam("v") String versions) {
-    try {
-      return new RowResource(user, table, rowspec, versions);
-    } catch (IOException e) {
-      throw new WebApplicationException(e, 
-                  Response.Status.INTERNAL_SERVER_ERROR);
-    }
-  }
-}
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Response;
+
+import org.apache.hadoop.hbase.stargate.User;
+
+public class TableResource implements Constants {
+
+  User user;
+  String table;
+
+  public TableResource(User user, String table) {
+    this.user = user;
+    this.table = table;
+  }
+
+  @Path("regions")
+  public RegionsResource getRegionsResource() throws IOException {
+    return new RegionsResource(user, table);
+  }
+
+  @Path("scanner")
+  public ScannerResource getScannerResource() throws IOException {
+    return new ScannerResource(user, table);
+  }
+
+  @Path("schema")
+  public SchemaResource getSchemaResource() throws IOException {
+    return new SchemaResource(user, table);
+  }
+
+  @Path("{rowspec: .+}")
+  public RowResource getRowResource(
+      final @PathParam("rowspec") String rowspec,
+      final @QueryParam("v") String versions) {
+    try {
+      return new RowResource(user, table, rowspec, versions);
+    } catch (IOException e) {
+      throw new WebApplicationException(e, 
+                  Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java Fri Mar 26 19:33:27 2010
@@ -1,94 +1,94 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.servlet.ServletContext;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.stargate.model.VersionModel;
-
-/**
- * Implements Stargate software version reporting via
- * <p>
- * <tt>/version/stargate</tt>
- * <p>
- * <tt>/version</tt> (alias for <tt>/version/stargate</tt>)
- */
-public class VersionResource implements Constants {
-  private static final Log LOG = LogFactory.getLog(VersionResource.class);
-
-  private CacheControl cacheControl;
-  private RESTServlet servlet;
-
-  public VersionResource() throws IOException {
-    servlet = RESTServlet.getInstance();
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  /**
-   * Build a response for a version request.
-   * @param context servlet context
-   * @param uriInfo (JAX-RS context variable) request URL
-   * @return a response for a version request 
-   */
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
-  public Response get(final @Context ServletContext context, 
-      final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
-    }
-    servlet.getMetrics().incrementRequests(1);
-    ResponseBuilder response = Response.ok(new VersionModel(context));
-    response.cacheControl(cacheControl);
-    return response.build();
-  }
-
-  /**
-   * Dispatch to StorageClusterVersionResource
-   */
-  @Path("cluster")
-  public StorageClusterVersionResource getClusterVersionResource() 
-      throws IOException {
-    return new StorageClusterVersionResource();
-  }
-
-  /**
-   * Dispatch <tt>/version/stargate</tt> to self.
-   */
-  @Path("stargate")
-  public VersionResource getVersionResource() {
-    return this;
-  }
-}
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+/**
+ * Implements Stargate software version reporting via
+ * <p>
+ * <tt>/version/stargate</tt>
+ * <p>
+ * <tt>/version</tt> (alias for <tt>/version/stargate</tt>)
+ */
+public class VersionResource implements Constants {
+  private static final Log LOG = LogFactory.getLog(VersionResource.class);
+
+  private CacheControl cacheControl;
+  private RESTServlet servlet;
+
+  public VersionResource() throws IOException {
+    servlet = RESTServlet.getInstance();
+    cacheControl = new CacheControl();
+    cacheControl.setNoCache(true);
+    cacheControl.setNoTransform(false);
+  }
+
+  /**
+   * Build a response for a version request.
+   * @param context servlet context
+   * @param uriInfo (JAX-RS context variable) request URL
+   * @return a response for a version request 
+   */
+  @GET
+  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+  public Response get(final @Context ServletContext context, 
+      final @Context UriInfo uriInfo) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    }
+    servlet.getMetrics().incrementRequests(1);
+    ResponseBuilder response = Response.ok(new VersionModel(context));
+    response.cacheControl(cacheControl);
+    return response.build();
+  }
+
+  /**
+   * Dispatch to StorageClusterVersionResource
+   */
+  @Path("cluster")
+  public StorageClusterVersionResource getClusterVersionResource() 
+      throws IOException {
+    return new StorageClusterVersionResource();
+  }
+
+  /**
+   * Dispatch <tt>/version/stargate</tt> to self.
+   */
+  @Path("stargate")
+  public VersionResource getVersionResource() {
+    return this;
+  }
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,26 +1,26 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message Cell {
-  optional bytes row = 1;       // unused if Cell is in a CellSet
-  optional bytes column = 2;
-  optional int64 timestamp = 3;
-  optional bytes data = 4;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Cell {
+  optional bytes row = 1;       // unused if Cell is in a CellSet
+  optional bytes column = 2;
+  optional int64 timestamp = 3;
+  optional bytes data = 4;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,29 +1,29 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import "CellMessage.proto";
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message CellSet {
-  message Row {
-    required bytes key = 1;
-    repeated Cell values = 2;
-  }
-  repeated Row rows = 1;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "CellMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message CellSet {
+  message Row {
+    required bytes key = 1;
+    repeated Cell values = 2;
+  }
+  repeated Row rows = 1;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,32 +1,32 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message ColumnSchema {
-  optional string name = 1;
-  message Attribute {
-    required string name = 1;
-    required string value = 2;
-  }
-  repeated Attribute attrs = 2;
-  // optional helpful encodings of commonly used attributes
-  optional int32 ttl = 3;
-  optional int32 maxVersions = 4;
-  optional string compression = 5;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message ColumnSchema {
+  optional string name = 1;
+  message Attribute {
+    required string name = 1;
+    required string value = 2;
+  }
+  repeated Attribute attrs = 2;
+  // optional helpful encodings of commonly used attributes
+  optional int32 ttl = 3;
+  optional int32 maxVersions = 4;
+  optional string compression = 5;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,30 +1,30 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message Scanner {
-  optional bytes startRow = 1;
-  optional bytes endRow = 2;
-  repeated bytes columns = 3;
-  optional int32 batch = 4;
-  optional int64 startTime = 5;
-  optional int64 endTime = 6;
-  optional int32 maxVersions = 7;
-  optional string filter = 8;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Scanner {
+  optional bytes startRow = 1;
+  optional bytes endRow = 2;
+  repeated bytes columns = 3;
+  optional int32 batch = 4;
+  optional int64 startTime = 5;
+  optional int64 endTime = 6;
+  optional int32 maxVersions = 7;
+  optional string filter = 8;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,45 +1,45 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message StorageClusterStatus {
-  message Region {
-    required bytes name = 1;
-    optional int32 stores = 2;
-    optional int32 storefiles = 3;
-    optional int32 storefileSizeMB = 4;
-    optional int32 memstoreSizeMB = 5;
-    optional int32 storefileIndexSizeMB = 6;
-  }
-  message Node {
-    required string name = 1;    // name:port
-    optional int64 startCode = 2;
-    optional int32 requests = 3;
-    optional int32 heapSizeMB = 4;
-    optional int32 maxHeapSizeMB = 5;
-    repeated Region regions = 6;
-  }
-  // node status
-  repeated Node liveNodes = 1;
-  repeated string deadNodes = 2;
-  // summary statistics
-  optional int32 regions = 3; 
-  optional int32 requests = 4; 
-  optional double averageLoad = 5;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message StorageClusterStatus {
+  message Region {
+    required bytes name = 1;
+    optional int32 stores = 2;
+    optional int32 storefiles = 3;
+    optional int32 storefileSizeMB = 4;
+    optional int32 memstoreSizeMB = 5;
+    optional int32 storefileIndexSizeMB = 6;
+  }
+  message Node {
+    required string name = 1;    // name:port
+    optional int64 startCode = 2;
+    optional int32 requests = 3;
+    optional int32 heapSizeMB = 4;
+    optional int32 maxHeapSizeMB = 5;
+    repeated Region regions = 6;
+  }
+  // node status
+  repeated Node liveNodes = 1;
+  repeated string deadNodes = 2;
+  // summary statistics
+  optional int32 regions = 3; 
+  optional int32 requests = 4; 
+  optional double averageLoad = 5;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,31 +1,31 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message TableInfo {
-  required string name = 1;
-  message Region {
-    required string name = 1;
-    optional bytes startKey = 2;
-    optional bytes endKey = 3;
-    optional int64 id = 4;
-    optional string location = 5;
-  }
-  repeated Region regions = 2;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableInfo {
+  required string name = 1;
+  message Region {
+    required string name = 1;
+    optional bytes startKey = 2;
+    optional bytes endKey = 3;
+    optional int64 id = 4;
+    optional string location = 5;
+  }
+  repeated Region regions = 2;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,34 +1,34 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import "ColumnSchemaMessage.proto";
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message TableSchema {
-  optional string name = 1;
-  message Attribute {
-    required string name = 1;
-    required string value = 2;
-  }  
-  repeated Attribute attrs = 2;
-  repeated ColumnSchema columns = 3;
-  // optional helpful encodings of commonly used attributes
-  optional bool inMemory = 4;
-  optional bool readOnly = 5;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "ColumnSchemaMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableSchema {
+  optional string name = 1;
+  message Attribute {
+    required string name = 1;
+    required string value = 2;
+  }  
+  repeated Attribute attrs = 2;
+  repeated ColumnSchema columns = 3;
+  // optional helpful encodings of commonly used attributes
+  optional bool inMemory = 4;
+  optional bool readOnly = 5;
+}

Modified: hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto (original)
+++ hadoop/hbase/trunk/contrib/stargate/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto Fri Mar 26 19:33:27 2010
@@ -1,27 +1,27 @@
-// Copyright 2010 The Apache Software Foundation
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package org.apache.hadoop.hbase.stargate.protobuf.generated;
-
-message Version {
-  optional string stargateVersion = 1;
-  optional string jvmVersion = 2;
-  optional string osVersion = 3;
-  optional string serverVersion = 4;
-  optional string jerseyVersion = 5;
-}
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Version {
+  optional string stargateVersion = 1;
+  optional string jvmVersion = 2;
+  optional string osVersion = 3;
+  optional string serverVersion = 4;
+  optional string jerseyVersion = 5;
+}

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java Fri Mar 26 19:33:27 2010
@@ -1,138 +1,138 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * Class that provides static method needed when putting deletes into memstore 
- */
-public class DeleteCompare {
-  
-  /**
-   * Return codes from deleteCompare.
-   */
-  enum DeleteCode {
-    /**
-     * Do nothing.  Move to next KV in memstore
-     */
-    SKIP,
-    
-    /**
-     * Add to the list of deletes.
-     */
-    DELETE,
-    
-    /**
-     * Stop looking at KVs in memstore.  Finalize.
-     */
-    DONE
-  }
-
-  /**
-   * Method used when putting deletes into memstore to remove all the previous
-   * entries that are affected by this Delete
-   * @param mem
-   * @param deleteBuffer
-   * @param deleteRowOffset
-   * @param deleteRowLength
-   * @param deleteQualifierOffset
-   * @param deleteQualifierLength
-   * @param deleteTimeOffset
-   * @param deleteType
-   * @param comparator
-   * @return SKIP if current KeyValue should not be deleted, DELETE if
-   * current KeyValue should be deleted and DONE when the current KeyValue is
-   * out of the Deletes range
-   */
-  public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
-      int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
-      int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
-      KeyValue.KeyComparator comparator) {
-
-    //Parsing new KeyValue
-    byte [] memBuffer = mem.getBuffer();
-    int memOffset = mem.getOffset();
-
-    //Getting key lengths
-    int memKeyLen = Bytes.toInt(memBuffer, memOffset);
-    memOffset += Bytes.SIZEOF_INT;
-
-    //Skipping value lengths
-    memOffset += Bytes.SIZEOF_INT;
-
-    //Getting row lengths
-    short memRowLen = Bytes.toShort(memBuffer, memOffset);
-    memOffset += Bytes.SIZEOF_SHORT;
-    int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
-        deleteBuffer, deleteRowOffset, deleteRowLength);
-    if(res > 0) {
-      return DeleteCode.DONE;
-    } else if(res < 0){
-      return DeleteCode.SKIP;
-    }
-
-    memOffset += memRowLen;
-
-    //Getting family lengths
-    byte memFamLen = memBuffer[memOffset];
-    memOffset += Bytes.SIZEOF_BYTE + memFamLen;
-
-    //Get column lengths
-    int memQualifierLen = memKeyLen - memRowLen - memFamLen -
-      Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
-      Bytes.SIZEOF_BYTE;
-
-    //Compare timestamp
-    int tsOffset = memOffset + memQualifierLen;
-    int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
-        deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
-
-    if (deleteType == KeyValue.Type.DeleteFamily.getCode()) {
-      if (timeRes <= 0) {
-        return DeleteCode.DELETE;
-      }
-      return DeleteCode.SKIP;
-    }
-
-    //Compare columns
-    res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
-        deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
-    if (res < 0) {
-      return DeleteCode.SKIP;
-    } else if(res > 0) {
-      return DeleteCode.DONE;
-    }
-    // same column, compare the time.
-    if (timeRes == 0) {
-      return DeleteCode.DELETE;
-    } else if (timeRes < 0) {
-      if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
-        return DeleteCode.DELETE;
-      }
-      return DeleteCode.DONE;
-    } else {
-      return DeleteCode.SKIP;
-    }
-  } 
-}
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * Class that provides static method needed when putting deletes into memstore 
+ */
+public class DeleteCompare {
+  
+  /**
+   * Return codes from deleteCompare.
+   */
+  enum DeleteCode {
+    /**
+     * Do nothing.  Move to next KV in memstore
+     */
+    SKIP,
+    
+    /**
+     * Add to the list of deletes.
+     */
+    DELETE,
+    
+    /**
+     * Stop looking at KVs in memstore.  Finalize.
+     */
+    DONE
+  }
+
+  /**
+   * Method used when putting deletes into memstore to remove all the previous
+   * entries that are affected by this Delete
+   * @param mem
+   * @param deleteBuffer
+   * @param deleteRowOffset
+   * @param deleteRowLength
+   * @param deleteQualifierOffset
+   * @param deleteQualifierLength
+   * @param deleteTimeOffset
+   * @param deleteType
+   * @param comparator
+   * @return SKIP if current KeyValue should not be deleted, DELETE if
+   * current KeyValue should be deleted and DONE when the current KeyValue is
+   * out of the Deletes range
+   */
+  public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
+      int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
+      int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
+      KeyValue.KeyComparator comparator) {
+
+    //Parsing new KeyValue
+    byte [] memBuffer = mem.getBuffer();
+    int memOffset = mem.getOffset();
+
+    //Getting key lengths
+    int memKeyLen = Bytes.toInt(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Skipping value lengths
+    memOffset += Bytes.SIZEOF_INT;
+
+    //Getting row lengths
+    short memRowLen = Bytes.toShort(memBuffer, memOffset);
+    memOffset += Bytes.SIZEOF_SHORT;
+    int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
+        deleteBuffer, deleteRowOffset, deleteRowLength);
+    if(res > 0) {
+      return DeleteCode.DONE;
+    } else if(res < 0){
+      return DeleteCode.SKIP;
+    }
+
+    memOffset += memRowLen;
+
+    //Getting family lengths
+    byte memFamLen = memBuffer[memOffset];
+    memOffset += Bytes.SIZEOF_BYTE + memFamLen;
+
+    //Get column lengths
+    int memQualifierLen = memKeyLen - memRowLen - memFamLen -
+      Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
+      Bytes.SIZEOF_BYTE;
+
+    //Compare timestamp
+    int tsOffset = memOffset + memQualifierLen;
+    int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
+        deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
+
+    if (deleteType == KeyValue.Type.DeleteFamily.getCode()) {
+      if (timeRes <= 0) {
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.SKIP;
+    }
+
+    //Compare columns
+    res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
+        deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
+    if (res < 0) {
+      return DeleteCode.SKIP;
+    } else if(res > 0) {
+      return DeleteCode.DONE;
+    }
+    // same column, compare the time.
+    if (timeRes == 0) {
+      return DeleteCode.DELETE;
+    } else if (timeRes < 0) {
+      if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
+        return DeleteCode.DELETE;
+      }
+      return DeleteCode.DONE;
+    } else {
+      return DeleteCode.SKIP;
+    }
+  } 
+}

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java?rev=928031&r1=928030&r2=928031&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java Fri Mar 26 19:33:27 2010
@@ -1,97 +1,97 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-/**
- * This interface is used for the tracking and enforcement of Deletes
- * during the course of a Get or Scan operation.
- * <p>
- * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile 
- */
-public interface DeleteTracker {
-  
-  /**
-   * Add the specified KeyValue to the list of deletes to check against for
-   * this row operation.
-   * <p>
-   * This is called when a Delete is encountered in a StoreFile.
-   * @param buffer KeyValue buffer
-   * @param qualifierOffset column qualifier offset
-   * @param qualifierLength column qualifier length
-   * @param timestamp timestamp
-   * @param type delete type as byte
-   */
-  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
-      long timestamp, byte type);
-  
-  /**
-   * Check if the specified KeyValue buffer has been deleted by a previously
-   * seen delete.
-   * @param buffer KeyValue buffer
-   * @param qualifierOffset column qualifier offset
-   * @param qualifierLength column qualifier length
-   * @param timestamp timestamp
-   * @return true is the specified KeyValue is deleted, false if not
-   */
-  public boolean isDeleted(byte [] buffer, int qualifierOffset,
-      int qualifierLength, long timestamp);
-  
-  /**
-   * @return true if there are no current delete, false otherwise
-   */
-  public boolean isEmpty();
-  
-  /**
-   * Called at the end of every StoreFile.
-   * <p>
-   * Many optimized implementations of Trackers will require an update at
-   * when the end of each StoreFile is reached.
-   */
-  public void update();
-  
-  /**
-   * Called between rows.
-   * <p>
-   * This clears everything as if a new DeleteTracker was instantiated.
-   */
-  public void reset();
-  
-
-  /**
-   * Return codes for comparison of two Deletes.
-   * <p>
-   * The codes tell the merging function what to do.
-   * <p>
-   * INCLUDE means add the specified Delete to the merged list.
-   * NEXT means move to the next element in the specified list(s).
-   */
-  enum DeleteCompare { 
-    INCLUDE_OLD_NEXT_OLD,
-    INCLUDE_OLD_NEXT_BOTH,
-    INCLUDE_NEW_NEXT_NEW,
-    INCLUDE_NEW_NEXT_BOTH,
-    NEXT_OLD,
-    NEXT_NEW
-  }
-  
-}
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * This interface is used for the tracking and enforcement of Deletes
+ * during the course of a Get or Scan operation.
+ * <p>
+ * This class is utilized through three methods:
+ * <ul><li>{@link #add} when encountering a Delete
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
+ * <li>{@link #update} when reaching the end of a StoreFile 
+ */
+public interface DeleteTracker {
+  
+  /**
+   * Add the specified KeyValue to the list of deletes to check against for
+   * this row operation.
+   * <p>
+   * This is called when a Delete is encountered in a StoreFile.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @param type delete type as byte
+   */
+  public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
+      long timestamp, byte type);
+  
+  /**
+   * Check if the specified KeyValue buffer has been deleted by a previously
+   * seen delete.
+   * @param buffer KeyValue buffer
+   * @param qualifierOffset column qualifier offset
+   * @param qualifierLength column qualifier length
+   * @param timestamp timestamp
+   * @return true is the specified KeyValue is deleted, false if not
+   */
+  public boolean isDeleted(byte [] buffer, int qualifierOffset,
+      int qualifierLength, long timestamp);
+  
+  /**
+   * @return true if there are no current delete, false otherwise
+   */
+  public boolean isEmpty();
+  
+  /**
+   * Called at the end of every StoreFile.
+   * <p>
+   * Many optimized implementations of Trackers will require an update at
+   * when the end of each StoreFile is reached.
+   */
+  public void update();
+  
+  /**
+   * Called between rows.
+   * <p>
+   * This clears everything as if a new DeleteTracker was instantiated.
+   */
+  public void reset();
+  
+
+  /**
+   * Return codes for comparison of two Deletes.
+   * <p>
+   * The codes tell the merging function what to do.
+   * <p>
+   * INCLUDE means add the specified Delete to the merged list.
+   * NEXT means move to the next element in the specified list(s).
+   */
+  enum DeleteCompare { 
+    INCLUDE_OLD_NEXT_OLD,
+    INCLUDE_OLD_NEXT_BOTH,
+    INCLUDE_NEW_NEXT_NEW,
+    INCLUDE_NEW_NEXT_BOTH,
+    NEXT_OLD,
+    NEXT_NEW
+  }
+  
+}



Mime
View raw message