hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject svn commit: r612739 [3/3] - in /lucene/hadoop/trunk: ./ src/c++/librecordio/ src/c++/librecordio/test/ src/java/org/apache/hadoop/record/compiler/ src/java/org/apache/hadoop/record/meta/ src/test/ddl/ src/test/org/apache/hadoop/record/
Date Thu, 17 Jan 2008 07:02:10 GMT
Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/StructTypeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/StructTypeID.java?rev=612739&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/StructTypeID.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/StructTypeID.java Wed Jan 16
23:02:07 2008
@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.hadoop.record.RecordInput;
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for a struct 
+ */
+public class StructTypeID extends TypeID {
+  private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>();
+  
+  StructTypeID() {
+    super(RIOType.STRUCT);
+  }
+  
+  /**
+   * Create a StructTypeID based on the RecordTypeInfo of some record
+   */
+  public StructTypeID(RecordTypeInfo rti) {
+    super(RIOType.STRUCT);
+    typeInfos.addAll(rti.getFieldTypeInfos());
+  }
+
+  void add (FieldTypeInfo ti) {
+    typeInfos.add(ti);
+  }
+  
+  public Collection<FieldTypeInfo> getFieldTypeInfos() {
+    return typeInfos;
+  }
+  
+  /* 
+   * return the StructTypeiD, if any, of the given field 
+   */
+  StructTypeID findStruct(String name) {
+    // walk through the list, searching. Not the most efficient way, but this
+    // in intended to be used rarely, so we keep it simple. 
+    // As an optimization, we can keep a hashmap of record name to its RTI, for later.
+    for (FieldTypeInfo ti : typeInfos) {
+      if ((0 == ti.getFieldID().compareTo(name)) && (ti.getTypeID().getTypeVal()
== RIOType.STRUCT)) {
+        return (StructTypeID) ti.getTypeID();
+      }
+    }
+    return null;
+  }
+  
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+    writeRest(rout, tag);
+  }
+
+  /* 
+   * Writes rest of the struct (excluding type value).
+   * As an optimization, this method is directly called by RTI 
+   * for the top level record so that we don't write out the byte
+   * indicating that this is a struct (since top level records are
+   * always structs).
+   */
+  void writeRest(RecordOutput rout, String tag) throws IOException {
+    rout.writeInt(typeInfos.size(), tag);
+    for (FieldTypeInfo ti : typeInfos) {
+      ti.write(rout, tag);
+    }
+  }
+
+  /* 
+   * deserialize ourselves. Called by RTI. 
+   */
+  void read(RecordInput rin, String tag) throws IOException {
+    // number of elements
+    int numElems = rin.readInt(tag);
+    for (int i=0; i<numElems; i++) {
+      typeInfos.add(genericReadTypeInfo(rin, tag));
+    }
+  }
+  
+  // generic reader: reads the next TypeInfo object from stream and returns it
+  private FieldTypeInfo genericReadTypeInfo(RecordInput rin, String tag) throws IOException
{
+    String fieldName = rin.readString(tag);
+    TypeID id = genericReadTypeID(rin, tag);
+    return new FieldTypeInfo(fieldName, id);
+  }
+  
+  // generic reader: reads the next TypeID object from stream and returns it
+  private TypeID genericReadTypeID(RecordInput rin, String tag) throws IOException {
+    byte typeVal = rin.readByte(tag);
+    switch (typeVal) {
+    case TypeID.RIOType.BOOL: 
+      return TypeID.BoolTypeID;
+    case TypeID.RIOType.BUFFER: 
+      return TypeID.BufferTypeID;
+    case TypeID.RIOType.BYTE:
+      return TypeID.ByteTypeID;
+    case TypeID.RIOType.DOUBLE:
+      return TypeID.DoubleTypeID;
+    case TypeID.RIOType.FLOAT:
+      return TypeID.FloatTypeID;
+    case TypeID.RIOType.INT: 
+      return TypeID.IntTypeID;
+    case TypeID.RIOType.LONG:
+      return TypeID.LongTypeID;
+    case TypeID.RIOType.MAP:
+    {
+      TypeID tIDKey = genericReadTypeID(rin, tag);
+      TypeID tIDValue = genericReadTypeID(rin, tag);
+      return new MapTypeID(tIDKey, tIDValue);
+    }
+    case TypeID.RIOType.STRING: 
+      return TypeID.StringTypeID;
+    case TypeID.RIOType.STRUCT: 
+    {
+      StructTypeID stID = new StructTypeID();
+      int numElems = rin.readInt(tag);
+      for (int i=0; i<numElems; i++) {
+        stID.add(genericReadTypeInfo(rin, tag));
+      }
+      return stID;
+    }
+    case TypeID.RIOType.VECTOR: 
+    {
+      TypeID tID = genericReadTypeID(rin, tag);
+      return new VectorTypeID(tID);
+    }
+    default:
+      // shouldn't be here
+      throw new IOException("Unknown type read");
+    }
+  }
+
+}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.hadoop.record.RecordInput;
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for a struct 
+ */
+public class StructTypeID extends TypeID {
+  private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>();
+  
+  StructTypeID() {
+    super(RIOType.STRUCT);
+  }
+  
+  /**
+   * Create a StructTypeID based on the RecordTypeInfo of some record
+   */
+  public StructTypeID(RecordTypeInfo rti) {
+    super(RIOType.STRUCT);
+    typeInfos.addAll(rti.getFieldTypeInfos());
+  }
+
+  void add (FieldTypeInfo ti) {
+    typeInfos.add(ti);
+  }
+  
+  public Collection<FieldTypeInfo> getFieldTypeInfos() {
+    return typeInfos;
+  }
+  
+  /* 
+   * return the StructTypeiD, if any, of the given field 
+   */
+  StructTypeID findStruct(String name) {
+    // walk through the list, searching. Not the most efficient way, but this
+    // in intended to be used rarely, so we keep it simple. 
+    // As an optimization, we can keep a hashmap of record name to its RTI, for later.
+    for (FieldTypeInfo ti : typeInfos) {
+      if ((0 == ti.getFieldID().compareTo(name)) && (ti.getTypeID().getTypeVal()
== RIOType.STRUCT)) {
+        return (StructTypeID) ti.getTypeID();
+      }
+    }
+    return null;
+  }
+  
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+    writeRest(rout, tag);
+  }
+
+  /* 
+   * Writes rest of the struct (excluding type value).
+   * As an optimization, this method is directly called by RTI 
+   * for the top level record so that we don't write out the byte
+   * indicating that this is a struct (since top level records are
+   * always structs).
+   */
+  void writeRest(RecordOutput rout, String tag) throws IOException {
+    rout.writeInt(typeInfos.size(), tag);
+    for (FieldTypeInfo ti : typeInfos) {
+      ti.write(rout, tag);
+    }
+  }
+
+  /* 
+   * deserialize ourselves. Called by RTI. 
+   */
+  void read(RecordInput rin, String tag) throws IOException {
+    // number of elements
+    int numElems = rin.readInt(tag);
+    for (int i=0; i<numElems; i++) {
+      typeInfos.add(genericReadTypeInfo(rin, tag));
+    }
+  }
+  
+  // generic reader: reads the next TypeInfo object from stream and returns it
+  private FieldTypeInfo genericReadTypeInfo(RecordInput rin, String tag) throws IOException
{
+    String fieldName = rin.readString(tag);
+    TypeID id = genericReadTypeID(rin, tag);
+    return new FieldTypeInfo(fieldName, id);
+  }
+  
+  // generic reader: reads the next TypeID object from stream and returns it
+  private TypeID genericReadTypeID(RecordInput rin, String tag) throws IOException {
+    byte typeVal = rin.readByte(tag);
+    switch (typeVal) {
+    case TypeID.RIOType.BOOL: 
+      return TypeID.BoolTypeID;
+    case TypeID.RIOType.BUFFER: 
+      return TypeID.BufferTypeID;
+    case TypeID.RIOType.BYTE:
+      return TypeID.ByteTypeID;
+    case TypeID.RIOType.DOUBLE:
+      return TypeID.DoubleTypeID;
+    case TypeID.RIOType.FLOAT:
+      return TypeID.FloatTypeID;
+    case TypeID.RIOType.INT: 
+      return TypeID.IntTypeID;
+    case TypeID.RIOType.LONG:
+      return TypeID.LongTypeID;
+    case TypeID.RIOType.MAP:
+    {
+      TypeID tIDKey = genericReadTypeID(rin, tag);
+      TypeID tIDValue = genericReadTypeID(rin, tag);
+      return new MapTypeID(tIDKey, tIDValue);
+    }
+    case TypeID.RIOType.STRING: 
+      return TypeID.StringTypeID;
+    case TypeID.RIOType.STRUCT: 
+    {
+      StructTypeID stID = new StructTypeID();
+      int numElems = rin.readInt(tag);
+      for (int i=0; i<numElems; i++) {
+        stID.add(genericReadTypeInfo(rin, tag));
+      }
+      return stID;
+    }
+    case TypeID.RIOType.VECTOR: 
+    {
+      TypeID tID = genericReadTypeID(rin, tag);
+      return new VectorTypeID(tID);
+    }
+    default:
+      // shouldn't be here
+      throw new IOException("Unknown type read");
+    }
+  }
+
+}

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/TypeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/TypeID.java?rev=612739&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/TypeID.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/TypeID.java Wed Jan 16 23:02:07
2008
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for basic types. 
+ */
+public class TypeID {
+
+  /**
+   * constants representing the IDL types we support
+   */
+  public static final class RIOType {
+    public static final byte BOOL   = 1;
+    public static final byte BUFFER = 2;
+    public static final byte BYTE   = 3;
+    public static final byte DOUBLE = 4;
+    public static final byte FLOAT  = 5;
+    public static final byte INT    = 6;
+    public static final byte LONG   = 7;
+    public static final byte MAP    = 8;
+    public static final byte STRING = 9;
+    public static final byte STRUCT = 10;
+    public static final byte VECTOR = 11;
+  }
+
+  /**
+   * Constant classes for the basic types, so we can share them.
+   */
+  public static final TypeID BoolTypeID = new TypeID(RIOType.BOOL);
+  public static final TypeID BufferTypeID = new TypeID(RIOType.BUFFER);
+  public static final TypeID ByteTypeID = new TypeID(RIOType.BYTE);
+  public static final TypeID DoubleTypeID = new TypeID(RIOType.DOUBLE);
+  public static final TypeID FloatTypeID = new TypeID(RIOType.FLOAT);
+  public static final TypeID IntTypeID = new TypeID(RIOType.INT);
+  public static final TypeID LongTypeID = new TypeID(RIOType.LONG);
+  public static final TypeID StringTypeID = new TypeID(RIOType.STRING);
+  
+  protected byte typeVal;
+
+  /**
+   * Create a TypeID object 
+   */
+  TypeID(byte typeVal) {
+    this.typeVal = typeVal;
+  }
+
+  /**
+   * Get the type value. One of the constants in RIOType.
+   */
+  public byte getTypeVal() {
+    return typeVal;
+  }
+
+  /**
+   * Serialize the TypeID object
+   */
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+  }
+  
+  /**
+   * Two base typeIDs are equal if they refer to the same type
+   */
+  public boolean equals(Object o) {
+    if (this == o) 
+      return true;
+    if (!(o instanceof TypeID))
+      return false;
+    TypeID oTypeID = (TypeID) o;
+    return (this.typeVal == oTypeID.typeVal);
+  }
+  
+  /**
+   * We use a basic hashcode implementation, since this class will likely not
+   * be used as a hashmap key 
+   */
+  public int hashCode() {
+    // See 'Effectve Java' by Joshua Bloch
+    return 37*17+(int)typeVal;
+  }
+}
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for basic types. 
+ */
+public class TypeID {
+
+  /**
+   * constants representing the IDL types we support
+   */
+  public static final class RIOType {
+    public static final byte BOOL   = 1;
+    public static final byte BUFFER = 2;
+    public static final byte BYTE   = 3;
+    public static final byte DOUBLE = 4;
+    public static final byte FLOAT  = 5;
+    public static final byte INT    = 6;
+    public static final byte LONG   = 7;
+    public static final byte MAP    = 8;
+    public static final byte STRING = 9;
+    public static final byte STRUCT = 10;
+    public static final byte VECTOR = 11;
+  }
+
+  /**
+   * Constant classes for the basic types, so we can share them.
+   */
+  public static final TypeID BoolTypeID = new TypeID(RIOType.BOOL);
+  public static final TypeID BufferTypeID = new TypeID(RIOType.BUFFER);
+  public static final TypeID ByteTypeID = new TypeID(RIOType.BYTE);
+  public static final TypeID DoubleTypeID = new TypeID(RIOType.DOUBLE);
+  public static final TypeID FloatTypeID = new TypeID(RIOType.FLOAT);
+  public static final TypeID IntTypeID = new TypeID(RIOType.INT);
+  public static final TypeID LongTypeID = new TypeID(RIOType.LONG);
+  public static final TypeID StringTypeID = new TypeID(RIOType.STRING);
+  
+  protected byte typeVal;
+
+  /**
+   * Create a TypeID object 
+   */
+  TypeID(byte typeVal) {
+    this.typeVal = typeVal;
+  }
+
+  /**
+   * Get the type value. One of the constants in RIOType.
+   */
+  public byte getTypeVal() {
+    return typeVal;
+  }
+
+  /**
+   * Serialize the TypeID object
+   */
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+  }
+  
+  /**
+   * Two base typeIDs are equal if they refer to the same type
+   */
+  public boolean equals(Object o) {
+    if (this == o) 
+      return true;
+    if (!(o instanceof TypeID))
+      return false;
+    TypeID oTypeID = (TypeID) o;
+    return (this.typeVal == oTypeID.typeVal);
+  }
+  
+  /**
+   * We use a basic hashcode implementation, since this class will likely not
+   * be used as a hashmap key 
+   */
+  public int hashCode() {
+    // See 'Effectve Java' by Joshua Bloch
+    return 37*17+(int)typeVal;
+  }
+}
+

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/Utils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/Utils.java?rev=612739&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/Utils.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/Utils.java Wed Jan 16 23:02:07
2008
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import java.util.Iterator;
+import org.apache.hadoop.record.RecordInput;
+
+/**
+ * Various utility functions for Hadooop record I/O platform.
+ */
+public class Utils {
+  
+  /** Cannot create a new instance of Utils */
+  private Utils() {
+  }
+  
+  /**
+   * read/skip bytes from stream based on a type
+   */
+  public static void skip(RecordInput rin, String tag, TypeID typeID) throws IOException
{
+    switch (typeID.typeVal) {
+    case TypeID.RIOType.BOOL: 
+      rin.readBool(tag);
+      break;
+    case TypeID.RIOType.BUFFER: 
+      rin.readBuffer(tag);
+      break;
+    case TypeID.RIOType.BYTE: 
+      rin.readByte(tag);
+      break;
+    case TypeID.RIOType.DOUBLE: 
+      rin.readDouble(tag);
+      break;
+    case TypeID.RIOType.FLOAT: 
+      rin.readFloat(tag);
+      break;
+    case TypeID.RIOType.INT: 
+      rin.readInt(tag);
+      break;
+    case TypeID.RIOType.LONG: 
+      rin.readLong(tag);
+      break;
+    case TypeID.RIOType.MAP: 
+      org.apache.hadoop.record.Index midx1 = rin.startMap(tag);
+      MapTypeID mtID = (MapTypeID) typeID;
+      for (; !midx1.done(); midx1.incr()) {
+        skip(rin, tag, mtID.getKeyTypeID());
+        skip(rin, tag, mtID.getValueTypeID());
+      }
+      rin.endMap(tag);
+      break;
+    case TypeID.RIOType.STRING: 
+      rin.readString(tag);
+      break;
+    case TypeID.RIOType.STRUCT:
+      rin.startRecord(tag);
+      // read past each field in the struct
+      StructTypeID stID = (StructTypeID) typeID;
+      Iterator<FieldTypeInfo> it = stID.getFieldTypeInfos().iterator();
+      while (it.hasNext()) {
+        FieldTypeInfo tInfo = it.next();
+        skip(rin, tag, tInfo.getTypeID());
+      }
+      rin.endRecord(tag);
+      break;
+    case TypeID.RIOType.VECTOR: 
+      org.apache.hadoop.record.Index vidx1 = rin.startVector(tag);
+      VectorTypeID vtID = (VectorTypeID) typeID;
+      for (; !vidx1.done(); vidx1.incr()) {
+        skip(rin, tag, vtID.getElementTypeID());
+      }
+      rin.endVector(tag);
+      break;
+    default: 
+      // shouldn't be here
+      throw new IOException("Unknown typeID when skipping bytes");
+    }
+  }
+}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+import java.util.Iterator;
+import org.apache.hadoop.record.RecordInput;
+
+/**
+ * Various utility functions for Hadooop record I/O platform.
+ */
+public class Utils {
+  
+  /** Cannot create a new instance of Utils */
+  private Utils() {
+  }
+  
+  /**
+   * read/skip bytes from stream based on a type
+   */
+  public static void skip(RecordInput rin, String tag, TypeID typeID) throws IOException
{
+    switch (typeID.typeVal) {
+    case TypeID.RIOType.BOOL: 
+      rin.readBool(tag);
+      break;
+    case TypeID.RIOType.BUFFER: 
+      rin.readBuffer(tag);
+      break;
+    case TypeID.RIOType.BYTE: 
+      rin.readByte(tag);
+      break;
+    case TypeID.RIOType.DOUBLE: 
+      rin.readDouble(tag);
+      break;
+    case TypeID.RIOType.FLOAT: 
+      rin.readFloat(tag);
+      break;
+    case TypeID.RIOType.INT: 
+      rin.readInt(tag);
+      break;
+    case TypeID.RIOType.LONG: 
+      rin.readLong(tag);
+      break;
+    case TypeID.RIOType.MAP: 
+      org.apache.hadoop.record.Index midx1 = rin.startMap(tag);
+      MapTypeID mtID = (MapTypeID) typeID;
+      for (; !midx1.done(); midx1.incr()) {
+        skip(rin, tag, mtID.getKeyTypeID());
+        skip(rin, tag, mtID.getValueTypeID());
+      }
+      rin.endMap(tag);
+      break;
+    case TypeID.RIOType.STRING: 
+      rin.readString(tag);
+      break;
+    case TypeID.RIOType.STRUCT:
+      rin.startRecord(tag);
+      // read past each field in the struct
+      StructTypeID stID = (StructTypeID) typeID;
+      Iterator<FieldTypeInfo> it = stID.getFieldTypeInfos().iterator();
+      while (it.hasNext()) {
+        FieldTypeInfo tInfo = it.next();
+        skip(rin, tag, tInfo.getTypeID());
+      }
+      rin.endRecord(tag);
+      break;
+    case TypeID.RIOType.VECTOR: 
+      org.apache.hadoop.record.Index vidx1 = rin.startVector(tag);
+      VectorTypeID vtID = (VectorTypeID) typeID;
+      for (; !vidx1.done(); vidx1.incr()) {
+        skip(rin, tag, vtID.getElementTypeID());
+      }
+      rin.endVector(tag);
+      break;
+    default: 
+      // shouldn't be here
+      throw new IOException("Unknown typeID when skipping bytes");
+    }
+  }
+}

Added: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/VectorTypeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/VectorTypeID.java?rev=612739&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/VectorTypeID.java (added)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/meta/VectorTypeID.java Wed Jan 16
23:02:07 2008
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for vector. 
+ */
+public class VectorTypeID extends TypeID {
+  private TypeID typeIDElement; 
+  
+  public VectorTypeID(TypeID typeIDElement) {
+    super(RIOType.VECTOR);
+    this.typeIDElement = typeIDElement;
+  }
+  
+  public TypeID getElementTypeID() {
+    return this.typeIDElement;
+  }
+  
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+    typeIDElement.write(rout, tag);
+  }
+  
+  /**
+   * Two vector typeIDs are equal if their constituent elements have the 
+   * same type
+   */
+  public boolean equals(Object o) {
+    if (this == o) 
+      return true;
+    if (!(o instanceof VectorTypeID))
+      return false;
+    VectorTypeID vti = (VectorTypeID) o;
+    return this.typeIDElement.equals(vti.typeIDElement);
+  }
+  
+  /**
+   * We use a basic hashcode implementation, since this class will likely not
+   * be used as a hashmap key 
+   */
+  public int hashCode() {
+    return 37*17+typeIDElement.hashCode();
+  }
+  
+}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record.meta;
+
+import java.io.IOException;
+
+import org.apache.hadoop.record.RecordOutput;
+
+/** 
+ * Represents typeID for vector. 
+ */
+public class VectorTypeID extends TypeID {
+  private TypeID typeIDElement; 
+  
+  public VectorTypeID(TypeID typeIDElement) {
+    super(RIOType.VECTOR);
+    this.typeIDElement = typeIDElement;
+  }
+  
+  public TypeID getElementTypeID() {
+    return this.typeIDElement;
+  }
+  
+  void write(RecordOutput rout, String tag) throws IOException {
+    rout.writeByte(typeVal, tag);
+    typeIDElement.write(rout, tag);
+  }
+  
+  /**
+   * Two vector typeIDs are equal if their constituent elements have the 
+   * same type
+   */
+  public boolean equals(Object o) {
+    if (this == o) 
+      return true;
+    if (!(o instanceof VectorTypeID))
+      return false;
+    VectorTypeID vti = (VectorTypeID) o;
+    return this.typeIDElement.equals(vti.typeIDElement);
+  }
+  
+  /**
+   * We use a basic hashcode implementation, since this class will likely not
+   * be used as a hashmap key 
+   */
+  public int hashCode() {
+    return 37*17+typeIDElement.hashCode();
+  }
+  
+}

Modified: lucene/hadoop/trunk/src/test/ddl/test.jr
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/ddl/test.jr?rev=612739&r1=612738&r2=612739&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/ddl/test.jr (original)
+++ lucene/hadoop/trunk/src/test/ddl/test.jr Wed Jan 16 23:02:07 2008
@@ -17,5 +17,30 @@
         map<ustring, ustring>   mapVal;
         RecRecord0      recordVal;
     }
+    
+  class RecRecordOld {
+  	ustring name;
+  	vector<long> ivec;
+  	vector<vector<RecRecord0>> svec;
+  	RecRecord0 inner;
+  	vector<vector<vector<ustring>>> strvec;
+  	float i1;
+  	map<byte, ustring> map1;
+  	vector<map<int, long>> mvec1;
+  	vector<map<int, long>> mvec2;
+  }
+  
+  /* RecRecordNew is a lot like RecRecordOld. Helps test for versioning. */
+  class RecRecordNew {
+  	ustring name2;
+  	RecRecord0 inner;
+  	vector<int> ivec;
+  	vector<vector<int>> svec;
+  	vector<vector<vector<ustring>>> strvec;
+		int i1;  	
+		map<long, ustring> map1;
+  	vector<map<int, long>> mvec2;	
+  }
+  
 }
 

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordVersioning.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordVersioning.java?rev=612739&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordVersioning.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordVersioning.java Wed Jan
16 23:02:07 2008
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.IOException;
+import junit.framework.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.TreeMap;
+import org.apache.hadoop.record.meta.RecordTypeInfo;
+
+/**
+ */
+public class TestRecordVersioning extends TestCase {
+    
+  public TestRecordVersioning(String testName) {
+    super(testName);
+  }
+
+  protected void setUp() throws Exception {
+  }
+
+  protected void tearDown() throws Exception {
+  }
+    
+  /* 
+   * basic versioning
+   * write out a record and its type info, read it back using its typeinfo
+   */
+  public void testBasic() {
+    File tmpfile, tmpRTIfile;
+    try {
+      tmpfile = File.createTempFile("hadooprec", ".dat");
+      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+      FileOutputStream ostream = new FileOutputStream(tmpfile);
+      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+      RecRecord1 r1 = new RecRecord1();
+      r1.setBoolVal(true);
+      r1.setByteVal((byte)0x66);
+      r1.setFloatVal(3.145F);
+      r1.setDoubleVal(1.5234);
+      r1.setIntVal(-4567);
+      r1.setLongVal(-2367L);
+      r1.setStringVal("random text");
+      r1.setBufferVal(new Buffer());
+      r1.setVectorVal(new ArrayList<String>());
+      r1.setMapVal(new TreeMap<String,String>());
+      RecRecord0 r0 = new RecRecord0();
+      r0.setStringVal("other random text");
+      r1.setRecordVal(r0);
+      r1.serialize(out, "");
+      ostream.close();
+      // write out the type info
+      RecRecord1.getTypeInfo().serialize(outRTI);
+      oRTIstream.close();
+      
+      // read
+      FileInputStream istream = new FileInputStream(tmpfile);
+      BinaryRecordInput in = new BinaryRecordInput(istream);
+      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+      RecordTypeInfo rti = new RecordTypeInfo();
+      rti.deserialize(inRTI);
+      iRTIstream.close();
+      RecRecord1.setTypeFilter(rti);
+      RecRecord1 r2 = new RecRecord1();
+      r2.deserialize(in, "");
+      istream.close();
+      tmpfile.delete();
+      tmpRTIfile.delete();
+      assertTrue("Serialized and deserialized versioned records do not match.", r1.equals(r2));
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    } 
+  }
+    
+  /* 
+   * versioning
+   * write out a record and its type info, read back a similar record using the written record's
typeinfo
+   */
+  public void testVersioning() {
+    File tmpfile, tmpRTIfile;
+    try {
+      tmpfile = File.createTempFile("hadooprec", ".dat");
+      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+      FileOutputStream ostream = new FileOutputStream(tmpfile);
+      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+
+      // we create an array of records to write
+      ArrayList<RecRecordOld> recsWrite = new ArrayList<RecRecordOld>();
+      int i, j, k, l;
+      for (i=0; i<5; i++) {
+        RecRecordOld s1Rec = new RecRecordOld();
+
+        s1Rec.setName("This is record s1: " + i);
+
+        ArrayList<Long> iA = new ArrayList<Long>();
+        for (j=0; j<3; j++) {
+          iA.add(new Long(i+j));
+        }
+        s1Rec.setIvec(iA);
+
+        ArrayList<ArrayList<RecRecord0>> ssVec = new ArrayList<ArrayList<RecRecord0>>();
+        for (j=0; j<2; j++) {
+          ArrayList<RecRecord0> sVec = new ArrayList<RecRecord0>();
+          for (k=0; k<3; k++) {
+            RecRecord0 sRec = new RecRecord0("This is record s: ("+j+": "+k+")");
+            sVec.add(sRec);
+          }
+          ssVec.add(sVec);
+        }
+        s1Rec.setSvec(ssVec);
+
+        s1Rec.setInner(new RecRecord0("This is record s: " + i));
+
+        ArrayList<ArrayList<ArrayList<String>>> aaaVec = new ArrayList<ArrayList<ArrayList<String>>>();
+        for (l=0; l<2; l++) {
+          ArrayList<ArrayList<String>> aaVec = new ArrayList<ArrayList<String>>();
+          for (j=0; j<2; j++) {
+            ArrayList<String> aVec = new ArrayList<String>();
+            for (k=0; k<3; k++) {
+              aVec.add(new String("THis is a nested string: (" + l + ": " + j + ": " + k
+ ")"));
+            }
+            aaVec.add(aVec);
+          }
+          aaaVec.add(aaVec);
+        }
+        s1Rec.setStrvec(aaaVec);
+
+        s1Rec.setI1(100+i);
+
+        java.util.TreeMap<Byte,String> map1 = new java.util.TreeMap<Byte,String>();
+        map1.put(new Byte("23"), "23");
+        map1.put(new Byte("11"), "11");
+        s1Rec.setMap1(map1);
+
+        java.util.TreeMap<Integer,Long> m1 = new java.util.TreeMap<Integer,Long>();
+        java.util.TreeMap<Integer,Long> m2 = new java.util.TreeMap<Integer,Long>();
+        m1.put(new Integer(5), 5L);
+        m1.put(new Integer(10), 10L);
+        m2.put(new Integer(15), 15L);
+        m2.put(new Integer(20), 20L);
+        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm1 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
+        vm1.add(m1);
+        vm1.add(m2);
+        s1Rec.setMvec1(vm1);
+        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm2 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
+        vm2.add(m1);
+        s1Rec.setMvec2(vm2);
+
+        // add to our list
+        recsWrite.add(s1Rec);
+      }
+
+      // write out to file
+      for (RecRecordOld rec: recsWrite) {
+        rec.serialize(out);
+      }
+      ostream.close();
+      // write out the type info
+      RecRecordOld.getTypeInfo().serialize(outRTI);
+      oRTIstream.close();
+
+      // read
+      FileInputStream istream = new FileInputStream(tmpfile);
+      BinaryRecordInput in = new BinaryRecordInput(istream);
+      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+      RecordTypeInfo rti = new RecordTypeInfo();
+
+      // read type info
+      rti.deserialize(inRTI);
+      iRTIstream.close();
+      RecRecordNew.setTypeFilter(rti);
+
+      // read records
+      ArrayList<RecRecordNew> recsRead = new ArrayList<RecRecordNew>();
+      for (i=0; i<recsWrite.size(); i++) {
+        RecRecordNew s2Rec = new RecRecordNew();
+        s2Rec.deserialize(in);
+        recsRead.add(s2Rec);
+      }
+      istream.close();
+      tmpfile.delete();
+      tmpRTIfile.delete();
+
+      // compare
+      for (i=0; i<recsRead.size(); i++) {
+        RecRecordOld s1Out = recsWrite.get(i);
+        RecRecordNew s2In = recsRead.get(i);
+        assertTrue("Incorrectly read name2 field", null == s2In.getName2());
+        assertTrue("Error comparing inner fields", (0 == s1Out.getInner().compareTo(s2In.getInner())));
+        assertTrue("Incorrectly read ivec field", null == s2In.getIvec());
+        assertTrue("Incorrectly read svec field", null == s2In.getSvec());
+        for (j=0; j<s2In.getStrvec().size(); j++) {
+          ArrayList<ArrayList<String>> ss2Vec = s2In.getStrvec().get(j);
+          ArrayList<ArrayList<String>> ss1Vec = s1Out.getStrvec().get(j);
+          for (k=0; k<ss2Vec.size(); k++) {
+            ArrayList<String> s2Vec = ss2Vec.get(k);
+            ArrayList<String> s1Vec = ss1Vec.get(k);
+            for (l=0; l<s2Vec.size(); l++) {
+              assertTrue("Error comparing strVec fields", (0 == s2Vec.get(l).compareTo(s1Vec.get(l))));
+            }
+          }
+        }
+        assertTrue("Incorrectly read map1 field", null == s2In.getMap1());
+        for (j=0; j<s2In.getMvec2().size(); j++) {
+          assertTrue("Error comparing mvec2 fields", (s2In.getMvec2().get(j).equals(s1Out.getMvec2().get(j))));
+        }
+      }
+
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    } 
+  }
+
+}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.record;
+
+import java.io.IOException;
+import junit.framework.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.TreeMap;
+import org.apache.hadoop.record.meta.RecordTypeInfo;
+
+/**
+ */
+public class TestRecordVersioning extends TestCase {
+    
+  public TestRecordVersioning(String testName) {
+    super(testName);
+  }
+
+  protected void setUp() throws Exception {
+  }
+
+  protected void tearDown() throws Exception {
+  }
+    
+  /* 
+   * basic versioning
+   * write out a record and its type info, read it back using its typeinfo
+   */
+  public void testBasic() {
+    File tmpfile, tmpRTIfile;
+    try {
+      tmpfile = File.createTempFile("hadooprec", ".dat");
+      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+      FileOutputStream ostream = new FileOutputStream(tmpfile);
+      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+      RecRecord1 r1 = new RecRecord1();
+      r1.setBoolVal(true);
+      r1.setByteVal((byte)0x66);
+      r1.setFloatVal(3.145F);
+      r1.setDoubleVal(1.5234);
+      r1.setIntVal(-4567);
+      r1.setLongVal(-2367L);
+      r1.setStringVal("random text");
+      r1.setBufferVal(new Buffer());
+      r1.setVectorVal(new ArrayList<String>());
+      r1.setMapVal(new TreeMap<String,String>());
+      RecRecord0 r0 = new RecRecord0();
+      r0.setStringVal("other random text");
+      r1.setRecordVal(r0);
+      r1.serialize(out, "");
+      ostream.close();
+      // write out the type info
+      RecRecord1.getTypeInfo().serialize(outRTI);
+      oRTIstream.close();
+      
+      // read
+      FileInputStream istream = new FileInputStream(tmpfile);
+      BinaryRecordInput in = new BinaryRecordInput(istream);
+      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+      RecordTypeInfo rti = new RecordTypeInfo();
+      rti.deserialize(inRTI);
+      iRTIstream.close();
+      RecRecord1.setTypeFilter(rti);
+      RecRecord1 r2 = new RecRecord1();
+      r2.deserialize(in, "");
+      istream.close();
+      tmpfile.delete();
+      tmpRTIfile.delete();
+      assertTrue("Serialized and deserialized versioned records do not match.", r1.equals(r2));
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    } 
+  }
+    
+  /* 
+   * versioning
+   * write out a record and its type info, read back a similar record using the written record's
typeinfo
+   */
+  public void testVersioning() {
+    File tmpfile, tmpRTIfile;
+    try {
+      tmpfile = File.createTempFile("hadooprec", ".dat");
+      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
+      FileOutputStream ostream = new FileOutputStream(tmpfile);
+      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
+      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
+      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
+
+      // we create an array of records to write
+      ArrayList<RecRecordOld> recsWrite = new ArrayList<RecRecordOld>();
+      int i, j, k, l;
+      for (i=0; i<5; i++) {
+        RecRecordOld s1Rec = new RecRecordOld();
+
+        s1Rec.setName("This is record s1: " + i);
+
+        ArrayList<Long> iA = new ArrayList<Long>();
+        for (j=0; j<3; j++) {
+          iA.add(new Long(i+j));
+        }
+        s1Rec.setIvec(iA);
+
+        ArrayList<ArrayList<RecRecord0>> ssVec = new ArrayList<ArrayList<RecRecord0>>();
+        for (j=0; j<2; j++) {
+          ArrayList<RecRecord0> sVec = new ArrayList<RecRecord0>();
+          for (k=0; k<3; k++) {
+            RecRecord0 sRec = new RecRecord0("This is record s: ("+j+": "+k+")");
+            sVec.add(sRec);
+          }
+          ssVec.add(sVec);
+        }
+        s1Rec.setSvec(ssVec);
+
+        s1Rec.setInner(new RecRecord0("This is record s: " + i));
+
+        ArrayList<ArrayList<ArrayList<String>>> aaaVec = new ArrayList<ArrayList<ArrayList<String>>>();
+        for (l=0; l<2; l++) {
+          ArrayList<ArrayList<String>> aaVec = new ArrayList<ArrayList<String>>();
+          for (j=0; j<2; j++) {
+            ArrayList<String> aVec = new ArrayList<String>();
+            for (k=0; k<3; k++) {
+              aVec.add(new String("THis is a nested string: (" + l + ": " + j + ": " + k
+ ")"));
+            }
+            aaVec.add(aVec);
+          }
+          aaaVec.add(aaVec);
+        }
+        s1Rec.setStrvec(aaaVec);
+
+        s1Rec.setI1(100+i);
+
+        java.util.TreeMap<Byte,String> map1 = new java.util.TreeMap<Byte,String>();
+        map1.put(new Byte("23"), "23");
+        map1.put(new Byte("11"), "11");
+        s1Rec.setMap1(map1);
+
+        java.util.TreeMap<Integer,Long> m1 = new java.util.TreeMap<Integer,Long>();
+        java.util.TreeMap<Integer,Long> m2 = new java.util.TreeMap<Integer,Long>();
+        m1.put(new Integer(5), 5L);
+        m1.put(new Integer(10), 10L);
+        m2.put(new Integer(15), 15L);
+        m2.put(new Integer(20), 20L);
+        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm1 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
+        vm1.add(m1);
+        vm1.add(m2);
+        s1Rec.setMvec1(vm1);
+        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm2 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
+        vm2.add(m1);
+        s1Rec.setMvec2(vm2);
+
+        // add to our list
+        recsWrite.add(s1Rec);
+      }
+
+      // write out to file
+      for (RecRecordOld rec: recsWrite) {
+        rec.serialize(out);
+      }
+      ostream.close();
+      // write out the type info
+      RecRecordOld.getTypeInfo().serialize(outRTI);
+      oRTIstream.close();
+
+      // read
+      FileInputStream istream = new FileInputStream(tmpfile);
+      BinaryRecordInput in = new BinaryRecordInput(istream);
+      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
+      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
+      RecordTypeInfo rti = new RecordTypeInfo();
+
+      // read type info
+      rti.deserialize(inRTI);
+      iRTIstream.close();
+      RecRecordNew.setTypeFilter(rti);
+
+      // read records
+      ArrayList<RecRecordNew> recsRead = new ArrayList<RecRecordNew>();
+      for (i=0; i<recsWrite.size(); i++) {
+        RecRecordNew s2Rec = new RecRecordNew();
+        s2Rec.deserialize(in);
+        recsRead.add(s2Rec);
+      }
+      istream.close();
+      tmpfile.delete();
+      tmpRTIfile.delete();
+
+      // compare
+      for (i=0; i<recsRead.size(); i++) {
+        RecRecordOld s1Out = recsWrite.get(i);
+        RecRecordNew s2In = recsRead.get(i);
+        assertTrue("Incorrectly read name2 field", null == s2In.getName2());
+        assertTrue("Error comparing inner fields", (0 == s1Out.getInner().compareTo(s2In.getInner())));
+        assertTrue("Incorrectly read ivec field", null == s2In.getIvec());
+        assertTrue("Incorrectly read svec field", null == s2In.getSvec());
+        for (j=0; j<s2In.getStrvec().size(); j++) {
+          ArrayList<ArrayList<String>> ss2Vec = s2In.getStrvec().get(j);
+          ArrayList<ArrayList<String>> ss1Vec = s1Out.getStrvec().get(j);
+          for (k=0; k<ss2Vec.size(); k++) {
+            ArrayList<String> s2Vec = ss2Vec.get(k);
+            ArrayList<String> s1Vec = ss1Vec.get(k);
+            for (l=0; l<s2Vec.size(); l++) {
+              assertTrue("Error comparing strVec fields", (0 == s2Vec.get(l).compareTo(s1Vec.get(l))));
+            }
+          }
+        }
+        assertTrue("Incorrectly read map1 field", null == s2In.getMap1());
+        for (j=0; j<s2In.getMvec2().size(); j++) {
+          assertTrue("Error comparing mvec2 fields", (s2In.getMvec2().get(j).equals(s1Out.getMvec2().get(j))));
+        }
+      }
+
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    } 
+  }
+
+}



Mime
View raw message