hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r946832 [2/3] - in /hadoop/mapreduce/trunk: ./ src/test/mapred/org/apache/hadoop/fs/slive/
Date Fri, 21 May 2010 00:02:13 GMT
Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java Fri May 21 00:02:12 2010
@@ -0,0 +1,401 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * Class which reads in and verifies bytes that have been read in
+ */
+class DataVerifier {
+  private static final int BYTES_PER_LONG = Constants.BYTES_PER_LONG;
+
+  private int bufferSize;
+
+  /**
+   * The output from verification includes the number of chunks that were the
+   * same as expected and the number of segments that were different than what
+   * was expected and the number of total bytes read
+   */
+  static class VerifyOutput {
+    private long same;
+    private long different;
+    private long read;
+    private long readTime;
+
+    VerifyOutput(long sameChunks, long differentChunks, long readBytes,
+        long readTime) {
+      this.same = sameChunks;
+      this.different = differentChunks;
+      this.read = readBytes;
+      this.readTime = readTime;
+    }
+
+    long getReadTime() {
+      return this.readTime;
+    }
+
+    long getBytesRead() {
+      return this.read;
+    }
+
+    long getChunksSame() {
+      return same;
+    }
+
+    long getChunksDifferent() {
+      return different;
+    }
+
+    public String toString() {
+      return "Bytes read = " + getBytesRead() + " same = " + getChunksSame()
+          + " different = " + getChunksDifferent() + " in " + getReadTime()
+          + " milliseconds";
+    }
+
+  }
+
+  /**
+   * Class used to hold the result of a read on a header
+   */
+  private static class ReadInfo {
+    private long byteAm;
+    private long hash;
+    private long timeTaken;
+    private long bytesRead;
+
+    ReadInfo(long byteAm, long hash, long timeTaken, long bytesRead) {
+      this.byteAm = byteAm;
+      this.hash = hash;
+      this.timeTaken = timeTaken;
+      this.bytesRead = bytesRead;
+    }
+
+    long getByteAm() {
+      return byteAm;
+    }
+
+    long getHashValue() {
+      return hash;
+    }
+
+    long getTimeTaken() {
+      return timeTaken;
+    }
+
+    long getBytesRead() {
+      return bytesRead;
+    }
+
+  }
+
+  /**
+   * Storage class used to hold the chunks same and different for buffered reads
+   * and the resultant verification
+   */
+  private static class VerifyInfo {
+
+    VerifyInfo(long same, long different) {
+      this.same = same;
+      this.different = different;
+    }
+
+    long getSame() {
+      return same;
+    }
+
+    long getDifferent() {
+      return different;
+    }
+
+    private long same;
+    private long different;
+  }
+
+  /**
+   * Inits with given buffer size (must be greater than bytes per long and a
+   * multiple of bytes per long)
+   * 
+   * @param bufferSize
+   *          size which must be greater than BYTES_PER_LONG and which also must
+   *          be a multiple of BYTES_PER_LONG
+   */
+  DataVerifier(int bufferSize) {
+    if (bufferSize < BYTES_PER_LONG) {
+      throw new IllegalArgumentException(
+          "Buffer size must be greater than or equal to " + BYTES_PER_LONG);
+    }
+    if ((bufferSize % BYTES_PER_LONG) != 0) {
+      throw new IllegalArgumentException("Buffer size must be a multiple of "
+          + BYTES_PER_LONG);
+    }
+    this.bufferSize = bufferSize;
+  }
+
+  /**
+   * Inits with the default buffer size
+   */
+  DataVerifier() {
+    this(Constants.BUFFERSIZE);
+  }
+
+  /**
+   * Verifies a buffer of a given size using the given start hash offset
+   * 
+   * @param buf
+   *          the buffer to verify
+   * @param size
+   *          the number of bytes to be used in that buffer
+   * @param startOffset
+   *          the start hash offset
+   * @param hasher
+   *          the hasher to use for calculating expected values
+   * 
+   * @return ResumeBytes a set of data about the next offset and chunks analyzed
+   */
+  private VerifyInfo verifyBuffer(ByteBuffer buf, int size, long startOffset,
+      DataHasher hasher) {
+    ByteBuffer cmpBuf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
+    long hashOffset = startOffset;
+    long chunksSame = 0;
+    long chunksDifferent = 0;
+    for (long i = 0; i < size; ++i) {
+      cmpBuf.put(buf.get());
+      if (!cmpBuf.hasRemaining()) {
+        cmpBuf.rewind();
+        long recievedData = cmpBuf.getLong();
+        cmpBuf.rewind();
+        long expected = hasher.generate(hashOffset);
+        hashOffset += BYTES_PER_LONG;
+        if (recievedData == expected) {
+          ++chunksSame;
+        } else {
+          ++chunksDifferent;
+        }
+      }
+    }
+    // any left over??
+    if (cmpBuf.hasRemaining() && cmpBuf.position() != 0) {
+      // partial capture
+      // zero fill and compare with zero filled
+      int curSize = cmpBuf.position();
+      while (cmpBuf.hasRemaining()) {
+        cmpBuf.put((byte) 0);
+      }
+      long expected = hasher.generate(hashOffset);
+      ByteBuffer tempBuf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
+      tempBuf.putLong(expected);
+      tempBuf.position(curSize);
+      while (tempBuf.hasRemaining()) {
+        tempBuf.put((byte) 0);
+      }
+      cmpBuf.rewind();
+      tempBuf.rewind();
+      if (cmpBuf.equals(tempBuf)) {
+        ++chunksSame;
+      } else {
+        ++chunksDifferent;
+      }
+    }
+    return new VerifyInfo(chunksSame, chunksDifferent);
+  }
+
+  /**
+   * Determines the offset to use given a byte counter
+   * 
+   * @param byteRead
+   * 
+   * @return offset position
+   */
+  private long determineOffset(long byteRead) {
+    if (byteRead < 0) {
+      byteRead = 0;
+    }
+    return (byteRead / BYTES_PER_LONG) * BYTES_PER_LONG;
+  }
+
+  /**
+   * Verifies a given number of bytes from a file - less number of bytes may be
+   * read if a header can not be read in due to the byte limit
+   * 
+   * @param byteAm
+   *          the byte amount to limit to (should be less than or equal to file
+   *          size)
+   * 
+   * @param in
+   *          the input stream to read from
+   * 
+   * @return VerifyOutput with data about reads
+   * 
+   * @throws IOException
+   *           if a read failure occurs
+   * 
+   * @throws BadFileException
+   *           if a header can not be read or end of file is reached
+   *           unexpectedly
+   */
+  VerifyOutput verifyFile(long byteAm, DataInputStream in)
+      throws IOException, BadFileException {
+    return verifyBytes(byteAm, 0, in);
+  }
+
+  /**
+   * Verifies a given number of bytes from a file - less number of bytes may be
+   * read if a header can not be read in due to the byte limit
+   * 
+   * @param byteAm
+   *          the byte amount to limit to (should be less than or equal to file
+   *          size)
+   * 
+   * @param bytesRead
+   *          the starting byte location
+   * 
+   * @param in
+   *          the input stream to read from
+   * 
+   * @return VerifyOutput with data about reads
+   * 
+   * @throws IOException
+   *           if a read failure occurs
+   * 
+   * @throws BadFileException
+   *           if a header can not be read or end of file is reached
+   *           unexpectedly
+   */
+  private VerifyOutput verifyBytes(long byteAm, long bytesRead,
+      DataInputStream in) throws IOException, BadFileException {
+    if (byteAm <= 0) {
+      return new VerifyOutput(0, 0, 0, 0);
+    }
+    long chunksSame = 0;
+    long chunksDifferent = 0;
+    long readTime = 0;
+    long bytesLeft = byteAm;
+    long bufLeft = 0;
+    long bufRead = 0;
+    long seqNum = 0;
+    DataHasher hasher = null;
+    ByteBuffer readBuf = ByteBuffer.wrap(new byte[bufferSize]);
+    while (bytesLeft > 0) {
+      if (bufLeft <= 0) {
+        if (bytesLeft < DataWriter.getHeaderLength()) {
+          // no bytes left to read a header
+          break;
+        }
+        // time to read a new header
+        ReadInfo header = null;
+        try {
+          header = readHeader(in);
+        } catch (EOFException e) {
+          // eof ok on header reads
+          // but not on data readers
+          break;
+        }
+        ++seqNum;
+        hasher = new DataHasher(header.getHashValue());
+        bufLeft = header.getByteAm();
+        readTime += header.getTimeTaken();
+        bytesRead += header.getBytesRead();
+        bytesLeft -= header.getBytesRead();
+        bufRead = 0;
+        // number of bytes to read greater than how many we want to read
+        if (bufLeft > bytesLeft) {
+          bufLeft = bytesLeft;
+        }
+        // does the buffer amount have anything??
+        if (bufLeft <= 0) {
+          continue;
+        }
+      }
+      // figure out the buffer size to read
+      int bufSize = bufferSize;
+      if (bytesLeft < bufSize) {
+        bufSize = (int) bytesLeft;
+      }
+      if (bufLeft < bufSize) {
+        bufSize = (int) bufLeft;
+      }
+      // read it in
+      try {
+        readBuf.rewind();
+        long startTime = Timer.now();
+        in.readFully(readBuf.array(), 0, bufSize);
+        readTime += Timer.elapsed(startTime);
+      } catch (EOFException e) {
+        throw new BadFileException(
+            "Could not read the number of expected data bytes " + bufSize
+                + " due to unexpected end of file during sequence " + seqNum, e);
+      }
+      // update the counters
+      bytesRead += bufSize;
+      bytesLeft -= bufSize;
+      bufLeft -= bufSize;
+      // verify what we read
+      readBuf.rewind();
+      // figure out the expected hash offset start point
+      long vOffset = determineOffset(bufRead);
+      // now update for new position
+      bufRead += bufSize;
+      // verify
+      VerifyInfo verifyRes = verifyBuffer(readBuf, bufSize, vOffset, hasher);
+      // update the verification counters
+      chunksSame += verifyRes.getSame();
+      chunksDifferent += verifyRes.getDifferent();
+    }
+    return new VerifyOutput(chunksSame, chunksDifferent, bytesRead, readTime);
+  }
+
+
+  /**
+   * Reads a header from the given input stream
+   * 
+   * @param in
+   *          input stream to read from
+   * 
+   * @return ReadInfo
+   * 
+   * @throws IOException
+   *           if a read error occurs or EOF occurs
+   * 
+   * @throws BadFileException
+   *           if end of file occurs or the byte amount read is invalid
+   */
+  ReadInfo readHeader(DataInputStream in) throws IOException,
+      BadFileException {
+    int headerLen = DataWriter.getHeaderLength();
+    ByteBuffer headerBuf = ByteBuffer.wrap(new byte[headerLen]);
+    long elapsed = 0;
+    {
+      long startTime = Timer.now();
+      in.readFully(headerBuf.array());
+      elapsed += Timer.elapsed(startTime);
+    }
+    headerBuf.rewind();
+    long hashValue = headerBuf.getLong();
+    long byteAvailable = headerBuf.getLong();
+    if (byteAvailable < 0) {
+      throw new BadFileException("Invalid negative amount " + byteAvailable
+          + " determined for header data amount");
+    }
+    return new ReadInfo(byteAvailable, hashValue, elapsed, headerLen);
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataVerifier.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java Fri May 21 00:02:12 2010
@@ -0,0 +1,375 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Random;
+
+import static org.apache.hadoop.fs.slive.Constants.BYTES_PER_LONG;
+
+/**
+ * Class which handles generating data (creating and appending) along with
+ * ensuring the correct data is written out for the given path name so that it
+ * can be later verified
+ */
+class DataWriter {
+  /**
+   * Header size in bytes
+   */
+  private static final int HEADER_LENGTH = (BYTES_PER_LONG * 2);
+
+  private int bufferSize;
+  private Random rnd;
+
+  /**
+   * Class used to hold the number of bytes written and time taken for write
+   * operations for callers to use
+   */
+  static class GenerateOutput {
+
+    private long bytes;
+    private long time;
+
+    GenerateOutput(long bytesWritten, long timeTaken) {
+      this.bytes = bytesWritten;
+      this.time = timeTaken;
+    }
+
+    long getBytesWritten() {
+      return bytes;
+    }
+
+    long getTimeTaken() {
+      return time;
+    }
+
+    public String toString() {
+      return "Wrote " + getBytesWritten() + " bytes " + " which took "
+          + getTimeTaken() + " milliseconds";
+    }
+  }
+
+  /**
+   * Class used to hold a byte buffer and offset position for generating data
+   */
+  private static class GenerateResult {
+    private long offset;
+    private ByteBuffer buffer;
+
+    GenerateResult(long offset, ByteBuffer buffer) {
+      this.offset = offset;
+      this.buffer = buffer;
+    }
+
+    long getOffset() {
+      return offset;
+    }
+
+    ByteBuffer getBuffer() {
+      return buffer;
+    }
+  }
+
+  /**
+   * What a header write output returns need the hash value to use and the time
+   * taken to perform the write + bytes written
+   */
+  private static class WriteInfo {
+    private long hashValue;
+    private long bytesWritten;
+    private long timeTaken;
+
+    WriteInfo(long hashValue, long bytesWritten, long timeTaken) {
+      this.hashValue = hashValue;
+      this.bytesWritten = bytesWritten;
+      this.timeTaken = timeTaken;
+    }
+
+    long getHashValue() {
+      return hashValue;
+    }
+
+    long getTimeTaken() {
+      return timeTaken;
+    }
+
+    long getBytesWritten() {
+      return bytesWritten;
+    }
+  }
+
+  /**
+   * Inits with given buffer size (must be greater than bytes per long and a
+   * multiple of bytes per long)
+   * 
+   * @param rnd
+   *          random number generator to use for hash value creation
+   * 
+   * @param bufferSize
+   *          size which must be greater than BYTES_PER_LONG and which also must
+   *          be a multiple of BYTES_PER_LONG
+   */
+  DataWriter(Random rnd, int bufferSize) {
+    if (bufferSize < BYTES_PER_LONG) {
+      throw new IllegalArgumentException(
+          "Buffer size must be greater than or equal to " + BYTES_PER_LONG);
+    }
+    if ((bufferSize % BYTES_PER_LONG) != 0) {
+      throw new IllegalArgumentException("Buffer size must be a multiple of "
+          + BYTES_PER_LONG);
+    }
+    this.bufferSize = bufferSize;
+    this.rnd = rnd;
+  }
+
+  /**
+   * Inits with default buffer size
+   */
+  DataWriter(Random rnd) {
+    this(rnd, Constants.BUFFERSIZE);
+  }
+
+  /**
+   * Generates a partial segment which is less than bytes per long size
+   * 
+   * @param byteAm
+   *          the number of bytes to generate (less than bytes per long)
+   * @param offset
+   *          the staring offset
+   * @param hasher
+   *          hasher to use for generating data given an offset
+   * 
+   * @return GenerateResult containing new offset and byte buffer
+   */
+  private GenerateResult generatePartialSegment(int byteAm, long offset,
+      DataHasher hasher) {
+    if (byteAm > BYTES_PER_LONG) {
+      throw new IllegalArgumentException(
+          "Partial bytes must be less or equal to " + BYTES_PER_LONG);
+    }
+    if (byteAm <= 0) {
+      throw new IllegalArgumentException(
+          "Partial bytes must be greater than zero and not " + byteAm);
+    }
+    ByteBuffer buf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
+    buf.putLong(hasher.generate(offset));
+    ByteBuffer allBytes = ByteBuffer.wrap(new byte[byteAm]);
+    buf.rewind();
+    for (int i = 0; i < byteAm; ++i) {
+      allBytes.put(buf.get());
+    }
+    allBytes.rewind();
+    return new GenerateResult(offset, allBytes);
+  }
+
+  /**
+   * Generates a full segment (aligned to bytes per long) of the given byte
+   * amount size
+   * 
+   * @param byteAm
+   *          long aligned size
+   * @param startOffset
+   *          starting hash offset
+   * @param hasher
+   *          hasher to use for generating data given an offset
+   * @return GenerateResult containing new offset and byte buffer
+   */
+  private GenerateResult generateFullSegment(int byteAm, long startOffset,
+      DataHasher hasher) {
+    if (byteAm <= 0) {
+      throw new IllegalArgumentException(
+          "Byte amount must be greater than zero and not " + byteAm);
+    }
+    if ((byteAm % BYTES_PER_LONG) != 0) {
+      throw new IllegalArgumentException("Byte amount " + byteAm
+          + " must be a multiple of " + BYTES_PER_LONG);
+    }
+    // generate all the segments
+    ByteBuffer allBytes = ByteBuffer.wrap(new byte[byteAm]);
+    long offset = startOffset;
+    ByteBuffer buf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
+    for (long i = 0; i < byteAm; i += BYTES_PER_LONG) {
+      buf.rewind();
+      buf.putLong(hasher.generate(offset));
+      buf.rewind();
+      allBytes.put(buf);
+      offset += BYTES_PER_LONG;
+    }
+    allBytes.rewind();
+    return new GenerateResult(offset, allBytes);
+  }
+
+  /**
+   * Writes a set of bytes to the output stream, for full segments it will write
+   * out the complete segment but for partial segments, ie when the last
+   * position does not fill up a full long then a partial set will be written
+   * out containing the needed bytes from the expected full segment
+   * 
+   * @param byteAm
+   *          the amount of bytes to write
+   * @param startPos
+   *          a BYTES_PER_LONG aligned start position
+   * @param hasher
+   *          hasher to use for generating data given an offset
+   * @param out
+   *          the output stream to write to
+   * @return how many bytes were written
+   * @throws IOException
+   */
+  private GenerateOutput writePieces(long byteAm, long startPos,
+      DataHasher hasher, OutputStream out) throws IOException {
+    if (byteAm <= 0) {
+      return new GenerateOutput(0, 0);
+    }
+    if (startPos < 0) {
+      startPos = 0;
+    }
+    int leftOver = (int) (byteAm % bufferSize);
+    long fullPieces = byteAm / bufferSize;
+    long offset = startPos;
+    long bytesWritten = 0;
+    long timeTaken = 0;
+    // write the full pieces that fit in the buffer size
+    for (long i = 0; i < fullPieces; ++i) {
+      GenerateResult genData = generateFullSegment(bufferSize, offset, hasher);
+      offset = genData.getOffset();
+      ByteBuffer gBuf = genData.getBuffer();
+      {
+        byte[] buf = gBuf.array();
+        long startTime = Timer.now();
+        out.write(buf);
+        if (Constants.FLUSH_WRITES) {
+          out.flush();
+        }
+        timeTaken += Timer.elapsed(startTime);
+        bytesWritten += buf.length;
+      }
+    }
+    if (leftOver > 0) {
+      ByteBuffer leftOverBuf = ByteBuffer.wrap(new byte[leftOver]);
+      int bytesLeft = leftOver % BYTES_PER_LONG;
+      leftOver = leftOver - bytesLeft;
+      // collect the piece which do not fit in the buffer size but is
+      // also greater or eq than BYTES_PER_LONG and a multiple of it
+      if (leftOver > 0) {
+        GenerateResult genData = generateFullSegment(leftOver, offset, hasher);
+        offset = genData.getOffset();
+        leftOverBuf.put(genData.getBuffer());
+      }
+      // collect any single partial byte segment
+      if (bytesLeft > 0) {
+        GenerateResult genData = generatePartialSegment(bytesLeft, offset,
+            hasher);
+        offset = genData.getOffset();
+        leftOverBuf.put(genData.getBuffer());
+      }
+      // do the write of both
+      leftOverBuf.rewind();
+      {
+        byte[] buf = leftOverBuf.array();
+        long startTime = Timer.now();
+        out.write(buf);
+        if (Constants.FLUSH_WRITES) {
+          out.flush();
+        }
+        timeTaken += Timer.elapsed(startTime);
+        bytesWritten += buf.length;
+      }
+    }
+    return new GenerateOutput(bytesWritten, timeTaken);
+  }
+
+  /**
+   * Writes to a stream the given number of bytes specified
+   * 
+   * @param byteAm
+   *          the file size in number of bytes to write
+   * 
+   * @param out
+   *          the outputstream to write to
+   * 
+   * @return the number of bytes written + time taken
+   * 
+   * @throws IOException
+   */
+  GenerateOutput writeSegment(long byteAm, OutputStream out)
+      throws IOException {
+    long headerLen = getHeaderLength();
+    if (byteAm < headerLen) {
+      // not enough bytes to write even the header
+      return new GenerateOutput(0, 0);
+    }
+    // adjust for header length
+    byteAm -= headerLen;
+    if (byteAm < 0) {
+      byteAm = 0;
+    }
+    WriteInfo header = writeHeader(out, byteAm);
+    DataHasher hasher = new DataHasher(header.getHashValue());
+    GenerateOutput pRes = writePieces(byteAm, 0, hasher, out);
+    long bytesWritten = pRes.getBytesWritten() + header.getBytesWritten();
+    long timeTaken = header.getTimeTaken() + pRes.getTimeTaken();
+    return new GenerateOutput(bytesWritten, timeTaken);
+  }
+
+  /**
+   * Gets the header length
+   * 
+   * @return int
+   */
+  static int getHeaderLength() {
+    return HEADER_LENGTH;
+  }
+
+  /**
+   * Writes a header to the given output stream
+   * 
+   * @param os
+   *          output stream to write to
+   * 
+   * @param fileSize
+   *          the file size to write
+   * 
+   * @return WriteInfo
+   * 
+   * @throws IOException
+   *           if a write failure occurs
+   */
+  WriteInfo writeHeader(OutputStream os, long fileSize) throws IOException {
+    int headerLen = getHeaderLength();
+    ByteBuffer buf = ByteBuffer.wrap(new byte[headerLen]);
+    long hash = rnd.nextLong();
+    buf.putLong(hash);
+    buf.putLong(fileSize);
+    buf.rewind();
+    byte[] headerData = buf.array();
+    long elapsed = 0;
+    {
+      long startTime = Timer.now();
+      os.write(headerData);
+      if (Constants.FLUSH_WRITES) {
+        os.flush();
+      }
+      elapsed += Timer.elapsed(startTime);
+    }
+    return new WriteInfo(hash, headerLen, elapsed);
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DataWriter.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * Operation which selects a random file and attempts to delete that file (if it
+ * exists)
+ * 
+ * This operation will capture statistics on success the time taken to delete
+ * and the number of successful deletions that occurred and on failure or error
+ * it will capture the number of failures and the amount of time taken to fail
+ */
+class DeleteOp extends Operation {
+
+  private static final Log LOG = LogFactory.getLog(DeleteOp.class);
+
+  DeleteOp(ConfigExtractor cfg, Random rnd) {
+    super(DeleteOp.class.getSimpleName(), cfg, rnd);
+  }
+
+  /**
+   * Gets the file to delete
+   */
+  protected Path getDeleteFile() {
+    Path fn = getFinder().getFile();
+    return fn;
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = super.run(fs);
+    try {
+      Path fn = getDeleteFile();
+      long timeTaken = 0;
+      boolean deleteStatus = false;
+      {
+        long startTime = Timer.now();
+        deleteStatus = fs.delete(fn, false);
+        timeTaken = Timer.elapsed(startTime);
+      }
+      // collect the stats
+      if (!deleteStatus) {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.FAILURES, 1L));
+        LOG.info("Could not delete " + fn);
+      } else {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.OK_TIME_TAKEN, timeTaken));
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.SUCCESSES, 1L));
+        LOG.info("Could delete " + fn);
+      }
+    } catch (FileNotFoundException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.NOT_FOUND, 1L));
+      LOG.warn("Error with deleting", e);
+    } catch (IOException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.FAILURES, 1L));
+      LOG.warn("Error with deleting", e);
+    }
+    return out;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DeleteOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java Fri May 21 00:02:12 2010
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+
+/**
+ * A input format which returns one dummy key and value
+ */
+@SuppressWarnings("deprecation")
+class DummyInputFormat implements InputFormat<Object, Object> {
+
+  static class EmptySplit implements InputSplit {
+    public void write(DataOutput out) throws IOException {
+    }
+
+    public void readFields(DataInput in) throws IOException {
+    }
+
+    public long getLength() {
+      return 0L;
+    }
+
+    public String[] getLocations() {
+      return new String[0];
+    }
+  }
+
+  public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
+    InputSplit[] splits = new InputSplit[numSplits];
+    for (int i = 0; i < splits.length; ++i) {
+      splits[i] = new EmptySplit();
+    }
+    return splits;
+  }
+
+  public RecordReader<Object, Object> getRecordReader(InputSplit split,
+      JobConf job, Reporter reporter) throws IOException {
+    return new RecordReader<Object, Object>() {
+
+      boolean once = false;
+
+      public boolean next(Object key, Object value) throws IOException {
+        if (!once) {
+          once = true;
+          return true;
+        }
+        return false;
+      }
+
+      public Object createKey() {
+        return new Object();
+      }
+
+      public Object createValue() {
+        return new Object();
+      }
+
+      public long getPos() throws IOException {
+        return 0L;
+      }
+
+      public void close() throws IOException {
+      }
+
+      public float getProgress() throws IOException {
+        return 0.0f;
+      }
+    };
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/DummyInputFormat.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java Fri May 21 00:02:12 2010
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
+
+/**
+ * Simple class that holds the number formatters used in the slive application
+ */
+class Formatter {
+
+  private static final String NUMBER_FORMAT = "###.###";
+
+  private static NumberFormat decFormatter = null;
+
+  private static NumberFormat percFormatter = null;
+
+  /**
+   * No construction allowed - only simple static accessor functions
+   */
+  private Formatter() {
+
+  }
+
+  /**
+   * Gets a decimal formatter that has 3 decimal point precision
+   * 
+   * @return NumberFormat formatter
+   */
+  static synchronized NumberFormat getDecimalFormatter() {
+    if (decFormatter == null) {
+      decFormatter = new DecimalFormat(NUMBER_FORMAT);
+    }
+    return decFormatter;
+  }
+
+  /**
+   * Gets a percent formatter that has 3 decimal point precision
+   * 
+   * @return NumberFormat formatter
+   */
+  static synchronized NumberFormat getPercentFormatter() {
+    if (percFormatter == null) {
+      percFormatter = NumberFormat.getPercentInstance();
+      percFormatter.setMaximumFractionDigits(3);
+    }
+    return percFormatter;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Formatter.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java Fri May 21 00:02:12 2010
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+/**
+ * Simple slive helper methods (may not exist in 0.20)
+ */
+class Helper {
+
+  private Helper() {
+
+  }
+
+  private static final String[] emptyStringArray = {};
+
+  /**
+   * Splits strings on comma and trims accordingly
+   * 
+   * @param str
+   * @return array of split
+   */
+  static String[] getTrimmedStrings(String str) {
+    if (null == str || "".equals(str.trim())) {
+      return emptyStringArray;
+    }
+    return str.trim().split("\\s*,\\s*");
+  }
+
+  /**
+   * Converts a byte value into a useful string for output
+   * 
+   * @param bytes
+   * 
+   * @return String
+   */
+  static String toByteInfo(long bytes) {
+    StringBuilder str = new StringBuilder();
+    if (bytes < 0) {
+      bytes = 0;
+    }
+    str.append(bytes);
+    str.append(" bytes or ");
+    str.append(bytes / 1024);
+    str.append(" kilobytes or ");
+    str.append(bytes / (1024 * 1024));
+    str.append(" megabytes or ");
+    str.append(bytes / (1024 * 1024 * 1024));
+    str.append(" gigabytes");
+    return str.toString();
+  }
+
+  /**
+   * Stringifys an array using the given separator.
+   * 
+   * @param args
+   *          the array to format
+   * @param sep
+   *          the separator string to use (ie comma or space)
+   * 
+   * @return String representing that array
+   */
+  static String stringifyArray(Object[] args, String sep) {
+    StringBuilder optStr = new StringBuilder();
+    for (int i = 0; i < args.length; ++i) {
+      optStr.append(args[i]);
+      if ((i + 1) != args.length) {
+        optStr.append(sep);
+      }
+    }
+    return optStr.toString();
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Helper.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * Operation which selects a random directory and attempts to list that
+ * directory (if it exists)
+ * 
+ * This operation will capture statistics on success the time taken to list that
+ * directory and the number of successful listings that occurred as well as the
+ * number of entries in the selected directory and on failure or error it will
+ * capture the number of failures and the amount of time taken to fail
+ */
+class ListOp extends Operation {
+
+  private static final Log LOG = LogFactory.getLog(ListOp.class);
+
+  ListOp(ConfigExtractor cfg, Random rnd) {
+    super(ListOp.class.getSimpleName(), cfg, rnd);
+  }
+
+  /**
+   * Gets the directory to list
+   * 
+   * @return Path
+   */
+  protected Path getDirectory() {
+    Path dir = getFinder().getDirectory();
+    return dir;
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = super.run(fs);
+    try {
+      Path dir = getDirectory();
+      long dirEntries = 0;
+      long timeTaken = 0;
+      {
+        long startTime = Timer.now();
+        FileStatus[] files = fs.listStatus(dir);
+        timeTaken = Timer.elapsed(startTime);
+        dirEntries = files.length;
+      }
+      // log stats
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.OK_TIME_TAKEN, timeTaken));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.SUCCESSES, 1L));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.DIR_ENTRIES, dirEntries));
+      LOG.info("Directory " + dir + " has " + dirEntries + " entries");
+    } catch (FileNotFoundException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.NOT_FOUND, 1L));
+      LOG.warn("Error with listing", e);
+    } catch (IOException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.FAILURES, 1L));
+      LOG.warn("Error with listing", e);
+    }
+    return out;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ListOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * Operation which selects a random directory and attempts to create that
+ * directory.
+ * 
+ * This operation will capture statistics on success the time taken to create
+ * that directory and the number of successful creations that occurred and on
+ * failure or error it will capture the number of failures and the amount of
+ * time taken to fail
+ */
+class MkdirOp extends Operation {
+
+  private static final Log LOG = LogFactory.getLog(MkdirOp.class);
+
+  MkdirOp(ConfigExtractor cfg, Random rnd) {
+    super(MkdirOp.class.getSimpleName(), cfg, rnd);
+  }
+  
+  /**
+   * Gets the directory name to try to make
+   * 
+   * @return Path
+   */
+  protected Path getDirectory() {
+    Path dir = getFinder().getDirectory();
+    return dir;
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = super.run(fs);
+    try {
+      Path dir = getDirectory();
+      boolean mkRes = false;
+      long timeTaken = 0;
+      {
+        long startTime = Timer.now();
+        mkRes = fs.mkdirs(dir);
+        timeTaken = Timer.elapsed(startTime);
+      }
+      // log stats
+      if (mkRes) {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.OK_TIME_TAKEN, timeTaken));
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.SUCCESSES, 1L));
+        LOG.info("Made directory " + dir);
+      } else {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.FAILURES, 1L));
+        LOG.warn("Could not make " + dir);
+      }
+    } catch (FileNotFoundException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.NOT_FOUND, 1L));
+      LOG.warn("Error with mkdir", e);
+    } catch (IOException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.FAILURES, 1L));
+      LOG.warn("Error with mkdir", e);
+    }
+    return out;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/MkdirOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Operation which wraps a given operation and allows an observer to be notified
+ * when the operation is about to start and when the operation has finished
+ */
+class ObserveableOp extends Operation {
+
+  /**
+   * The observation interface which class that wish to monitor starting and
+   * ending events must implement.
+   */
+  interface Observer {
+    void notifyStarting(Operation op);
+    void notifyFinished(Operation op);
+  }
+
+  private Operation op;
+  private Observer observer;
+
+  ObserveableOp(Operation op, Observer observer) {
+    super(op.getType(), op.getConfig(), op.getRandom());
+    this.op = op;
+    this.observer = observer;
+  }
+
+  /**
+   * Proxy to underlying operation toString()
+   */
+  public String toString() {
+    return op.toString();
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> result = null;
+    try {
+      if (observer != null) {
+        observer.notifyStarting(op);
+      }
+      result = op.run(fs);
+    } finally {
+      if (observer != null) {
+        observer.notifyFinished(op);
+      }
+    }
+    return result;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ObserveableOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java Fri May 21 00:02:12 2010
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * An operation provides these abstractions and if it desires to perform any
+ * operations it must implement a override of the run() function to provide
+ * varying output to be captured.
+ */
+abstract class Operation {
+
+  private ConfigExtractor config;
+  private PathFinder finder;
+  private String type;
+  private Random rnd;
+
+  protected Operation(String type, ConfigExtractor cfg, Random rnd) {
+    this.config = cfg;
+    this.type = type;
+    this.rnd = rnd;
+    this.finder = new PathFinder(cfg, rnd);
+  }
+
+  /**
+   * Gets the configuration object this class is using
+   * 
+   * @return ConfigExtractor
+   */
+  protected ConfigExtractor getConfig() {
+    return this.config;
+  }
+
+  /**
+   * Gets the random number generator to use for this operation
+   * 
+   * @return Random
+   */
+  protected Random getRandom() {
+    return this.rnd;
+  }
+
+  /**
+   * Gets the type of operation that this class belongs to
+   * 
+   * @return String
+   */
+  String getType() {
+    return type;
+  }
+
+  /**
+   * Gets the path finding/generating instance that this class is using
+   * 
+   * @return PathFinder
+   */
+  protected PathFinder getFinder() {
+    return this.finder;
+  }
+
+  /*
+   * (non-Javadoc)
+   * 
+   * @see java.lang.Object#toString()
+   */
+  public String toString() {
+    return getType();
+  }
+
+  /**
+   * This run() method simply sets up the default output container and adds in a
+   * data member to keep track of the number of operations that occurred
+   * 
+   * @param fs
+   *          FileSystem object to perform operations with
+   * 
+   * @return List of operation outputs to be collected and output in the overall
+   *         map reduce operation (or empty or null if none)
+   */
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = new LinkedList<OperationOutput>();
+    out.add(new OperationOutput(OutputType.LONG, getType(),
+        ReportWriter.OP_COUNT, 1L));
+    return out;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Operation.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java Fri May 21 00:02:12 2010
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import org.apache.hadoop.fs.slive.Constants.Distribution;
+
+/**
+ * This class holds the data representing what an operations distribution and
+ * its percentage is (between 0 and 1) and provides operations to access those
+ * types and parse and unparse from and into strings
+ */
+class OperationData {
+
+  private static final String SEP = ",";
+
+  private Distribution distribution;
+  private Double percent;
+
+  OperationData(Distribution d, Double p) {
+    this.distribution = d;
+    this.percent = p;
+  }
+
+  /**
+   * Expects a comma separated list (where the first element is the ratio
+   * (between 0 and 100)) and the second element is the distribution (if
+   * non-existent then uniform will be selected). If an empty list is passed in
+   * then this element will just set the distribution (to uniform) and leave the
+   * percent as null.
+   */
+  OperationData(String data) {
+    String pieces[] = Helper.getTrimmedStrings(data);
+    distribution = Distribution.UNIFORM;
+    percent = null;
+    if (pieces.length == 1) {
+      percent = (Double.parseDouble(pieces[0]) / 100.0d);
+    } else if (pieces.length >= 2) {
+      percent = (Double.parseDouble(pieces[0]) / 100.0d);
+      distribution = Distribution.valueOf(pieces[1].toUpperCase());
+    }
+  }
+
+  /**
+   * Gets the distribution this operation represents
+   * 
+   * @return Distribution
+   */
+  Distribution getDistribution() {
+    return distribution;
+  }
+
+  /**
+   * Gets the 0 - 1 percent that this operations run ratio should be
+   * 
+   * @return Double (or null if not given)
+   */
+  Double getPercent() {
+    return percent;
+  }
+
+  /**
+   * Returns a string list representation of this object (if the percent is
+   * null) then NaN will be output instead. Format is percent,distribution.
+   */
+  public String toString() {
+    StringBuilder str = new StringBuilder();
+    if (getPercent() != null) {
+      str.append(getPercent() * 100.0d);
+    } else {
+      str.append(Double.NaN);
+    }
+    str.append(SEP);
+    str.append(getDistribution().lowerName());
+    return str.toString();
+  }
+}
\ No newline at end of file

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationData.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java Fri May 21 00:02:12 2010
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.fs.slive.Constants.OperationType;
+
+/**
+ * Factory class which returns instances of operations given there operation
+ * type enumeration (in string or enumeration format).
+ */
+class OperationFactory {
+
+  private Map<OperationType, Operation> typedOperations;
+  private ConfigExtractor config;
+  private Random rnd;
+
+  OperationFactory(ConfigExtractor cfg, Random rnd) {
+    this.typedOperations = new HashMap<OperationType, Operation>();
+    this.config = cfg;
+    this.rnd = rnd;
+  }
+
+  /**
+   * Gets an operation instance (cached) for a given operation type
+   * 
+   * @param type
+   *          the operation type to fetch for
+   * 
+   * @return Operation operation instance or null if it can not be fetched.
+   */
+  Operation getOperation(OperationType type) {
+    Operation op = typedOperations.get(type);
+    if (op != null) {
+      return op;
+    }
+    switch (type) {
+    case READ:
+      op = new ReadOp(this.config, rnd);
+      break;
+    case LS:
+      op = new ListOp(this.config, rnd);
+      break;
+    case MKDIR:
+      op = new MkdirOp(this.config, rnd);
+      break;
+    case APPEND:
+      op = new AppendOp(this.config, rnd);
+      break;
+    case RENAME:
+      op = new RenameOp(this.config, rnd);
+      break;
+    case DELETE:
+      op = new DeleteOp(this.config, rnd);
+      break;
+    case CREATE:
+      op = new CreateOp(this.config, rnd);
+      break;
+    }
+    typedOperations.put(type, op);
+    return op;
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationFactory.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java Fri May 21 00:02:12 2010
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import org.apache.hadoop.io.Text;
+
+/**
+ * An operation output has the following object format whereby simple types are
+ * represented as a key of dataType:operationType*measurementType and these
+ * simple types can be combined (mainly in the reducer) using there given types
+ * into a single operation output.
+ * 
+ * Combination is done based on the data types and the following convention is
+ * followed (in the following order). If one is a string then the other will be
+ * concated as a string with a ";" separator. If one is a double then the other
+ * will be added as a double and the output will be a double. If one is a float
+ * then the other will be added as a float and the the output will be a float.
+ * Following this if one is a long the other will be added as a long and the
+ * output type will be a long and if one is a integer the other will be added as
+ * a integer and the output type will be an integer.
+ */
+class OperationOutput {
+
+  private OutputType dataType;
+  private String opType, measurementType;
+  private Object value;
+
+  private static final String TYPE_SEP = ":";
+  private static final String MEASUREMENT_SEP = "*";
+  private static final String STRING_SEP = ";";
+
+  static enum OutputType {
+    STRING, FLOAT, LONG, DOUBLE, INTEGER
+  }
+
+  /**
+   * Parses a given key according to the expected key format and forms the given
+   * segments.
+   * 
+   * @param key
+   *          the key in expected dataType:operationType*measurementType format
+   * @param value
+   *          a generic value expected to match the output type
+   * @throws IllegalArgumentException
+   *           if invalid format
+   */
+  OperationOutput(String key, Object value) {
+    int place = key.indexOf(TYPE_SEP);
+    if (place == -1) {
+      throw new IllegalArgumentException(
+          "Invalid key format - no type seperator - " + TYPE_SEP);
+    }
+    try {
+      dataType = OutputType.valueOf(key.substring(0, place).toUpperCase());
+    } catch (Exception e) {
+      throw new IllegalArgumentException(
+          "Invalid key format - invalid output type", e);
+    }
+    key = key.substring(place + 1);
+    place = key.indexOf(MEASUREMENT_SEP);
+    if (place == -1) {
+      throw new IllegalArgumentException(
+          "Invalid key format - no measurement seperator - " + MEASUREMENT_SEP);
+    }
+    opType = key.substring(0, place);
+    measurementType = key.substring(place + 1);
+    this.value = value;
+  }
+
+  OperationOutput(Text key, Object value) {
+    this(key.toString(), value);
+  }
+
+  OperationOutput(OperationOutput r1) {
+    this.dataType = r1.dataType;
+    this.measurementType = r1.measurementType;
+    this.opType = r1.opType;
+    this.value = r1.value;
+  }
+
+  public String toString() {
+    return getKeyString() + " (" + this.value + ")";
+  }
+
+  OperationOutput(OutputType dataType, String opType, String measurementType,
+      Object value) {
+    this.dataType = dataType;
+    this.opType = opType;
+    this.measurementType = measurementType;
+    this.value = value;
+  }
+
+  /**
+   * Merges according to the documented rules for merging. Only will merge if
+   * measurement type and operation type is the same.
+   * 
+   * @param o1
+   *          the first object to merge with the second
+   * @param o2
+   *          the second object.
+   * 
+   * @return OperationOutput merged output.
+   * 
+   * @throws IllegalArgumentException
+   *           if unable to merge due to incompatible formats/types
+   */
+  static OperationOutput merge(OperationOutput o1, OperationOutput o2) {
+    if (o1.getMeasurementType().equals(o2.getMeasurementType())
+        && o1.getOperationType().equals(o2.getOperationType())) {
+      Object newvalue = null;
+      OutputType newtype = null;
+      String opType = o1.getOperationType();
+      String mType = o1.getMeasurementType();
+      if (o1.getOutputType() == OutputType.STRING
+          || o2.getOutputType() == OutputType.STRING) {
+        newtype = OutputType.STRING;
+        StringBuilder str = new StringBuilder();
+        str.append(o1.getValue());
+        str.append(STRING_SEP);
+        str.append(o2.getValue());
+        newvalue = str.toString();
+      } else if (o1.getOutputType() == OutputType.DOUBLE
+          || o2.getOutputType() == OutputType.DOUBLE) {
+        newtype = OutputType.DOUBLE;
+        try {
+          newvalue = Double.parseDouble(o1.getValue().toString())
+              + Double.parseDouble(o2.getValue().toString());
+        } catch (NumberFormatException e) {
+          throw new IllegalArgumentException(
+              "Unable to combine a type with a double " + o1 + " & " + o2, e);
+        }
+      } else if (o1.getOutputType() == OutputType.FLOAT
+          || o2.getOutputType() == OutputType.FLOAT) {
+        newtype = OutputType.FLOAT;
+        try {
+          newvalue = Float.parseFloat(o1.getValue().toString())
+              + Float.parseFloat(o2.getValue().toString());
+        } catch (NumberFormatException e) {
+          throw new IllegalArgumentException(
+              "Unable to combine a type with a float " + o1 + " & " + o2, e);
+        }
+      } else if (o1.getOutputType() == OutputType.LONG
+          || o2.getOutputType() == OutputType.LONG) {
+        newtype = OutputType.LONG;
+        try {
+          newvalue = Long.parseLong(o1.getValue().toString())
+              + Long.parseLong(o2.getValue().toString());
+        } catch (NumberFormatException e) {
+          throw new IllegalArgumentException(
+              "Unable to combine a type with a long " + o1 + " & " + o2, e);
+        }
+      } else if (o1.getOutputType() == OutputType.INTEGER
+          || o2.getOutputType() == OutputType.INTEGER) {
+        newtype = OutputType.INTEGER;
+        try {
+          newvalue = Integer.parseInt(o1.getValue().toString())
+              + Integer.parseInt(o2.getValue().toString());
+        } catch (NumberFormatException e) {
+          throw new IllegalArgumentException(
+              "Unable to combine a type with an int " + o1 + " & " + o2, e);
+        }
+      }
+      return new OperationOutput(newtype, opType, mType, newvalue);
+    } else {
+      throw new IllegalArgumentException("Unable to combine dissimilar types "
+          + o1 + " & " + o2);
+    }
+  }
+
+  /**
+   * Formats the key for output
+   * 
+   * @return String
+   */
+  private String getKeyString() {
+    StringBuilder str = new StringBuilder();
+    str.append(getOutputType().name());
+    str.append(TYPE_SEP);
+    str.append(getOperationType());
+    str.append(MEASUREMENT_SEP);
+    str.append(getMeasurementType());
+    return str.toString();
+  }
+
+  /**
+   * Retrieves the key in a hadoop text object
+   * 
+   * @return Text text output
+   */
+  Text getKey() {
+    return new Text(getKeyString());
+  }
+
+  /**
+   * Gets the output value in text format
+   * 
+   * @return Text
+   */
+  Text getOutputValue() {
+    StringBuilder valueStr = new StringBuilder();
+    valueStr.append(getValue());
+    return new Text(valueStr.toString());
+  }
+
+  /**
+   * Gets the object that represents this value (expected to match the output
+   * data type)
+   * 
+   * @return Object
+   */
+  Object getValue() {
+    return value;
+  }
+
+  /**
+   * Gets the output data type of this class.
+   * 
+   * @return
+   */
+  OutputType getOutputType() {
+    return dataType;
+  }
+
+  /**
+   * Gets the operation type this object represents.
+   * 
+   * @return String
+   */
+  String getOperationType() {
+    return opType;
+  }
+
+  /**
+   * Gets the measurement type this object represents.
+   * 
+   * @return String
+   */
+  String getMeasurementType() {
+    return measurementType;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationOutput.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java Fri May 21 00:02:12 2010
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+/**
+ * Class which holds an operation and its weight (used in operation selection)
+ */
+class OperationWeight {
+  private double weight;
+  private Operation operation;
+
+  OperationWeight(Operation op, double weight) {
+    this.operation = op;
+    this.weight = weight;
+  }
+
+  /**
+   * Fetches the given operation weight
+   * 
+   * @return Double
+   */
+  double getWeight() {
+    return weight;
+  }
+
+  /**
+   * Gets the operation
+   * 
+   * @return Operation
+   */
+  Operation getOperation() {
+    return operation;
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/OperationWeight.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java Fri May 21 00:02:12 2010
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.util.Random;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Class which generates a file or directory path using a simple random
+ * generation algorithm stated in http://issues.apache.org/jira/browse/HDFS-708
+ */
+class PathFinder {
+
+  private static enum Type {
+    FILE, DIRECTORY
+  }
+
+  private static final String DIR_PREFIX = "sl_dir_";
+  private static final String FILE_PREFIX = "sl_file_";
+
+  private Path basePath;
+  private ConfigExtractor config;
+  private Random rnd;
+
+  PathFinder(ConfigExtractor cfg, Random rnd) {
+    this.basePath = cfg.getDataPath();
+    this.config = cfg;
+    this.rnd = rnd;
+  }
+
+  /**
+   * This function uses a simple recursive algorithm to generate a path name
+   * using the current id % limitPerDir and using current id / limitPerDir to
+   * form the rest of the tree segments
+   * 
+   * @param curId
+   *          the current id to use for determining the current directory id %
+   *          per directory limit and then used for determining the next segment
+   *          of the path to use, if <= zero this will return the base path
+   * @param limitPerDir
+   *          the per directory file limit used in modulo and division
+   *          operations to calculate the file name and path tree
+   * @param type
+   *          directory or file enumeration
+   * @return
+   */
+  private Path getPath(int curId, int limitPerDir, Type type) {
+    if (curId <= 0) {
+      return basePath;
+    }
+    String name = "";
+    switch (type) {
+    case FILE:
+      name = FILE_PREFIX + new Integer(curId % limitPerDir).toString();
+      break;
+    case DIRECTORY:
+      name = DIR_PREFIX + new Integer(curId % limitPerDir).toString();
+      break;
+    }
+    Path base = getPath((curId / limitPerDir), limitPerDir, Type.DIRECTORY);
+    return new Path(base, name);
+  }
+
+  /**
+   * Gets a file path using the given configuration provided total files and
+   * files per directory
+   * 
+   * @return path
+   */
+  Path getFile() {
+    int fileLimit = config.getTotalFiles();
+    int dirLimit = config.getDirSize();
+    int startPoint = 1 + rnd.nextInt(fileLimit);
+    return getPath(startPoint, dirLimit, Type.FILE);
+  }
+
+  /**
+   * Gets a directory path using the given configuration provided total files
+   * and files per directory
+   * 
+   * @return path
+   */
+  Path getDirectory() {
+    int fileLimit = config.getTotalFiles();
+    int dirLimit = config.getDirSize();
+    int startPoint = rnd.nextInt(fileLimit);
+    return getPath(startPoint, dirLimit, Type.DIRECTORY);
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/PathFinder.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Range.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Range.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Range.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Range.java Fri May 21 00:02:12 2010
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.util.Random;
+
+/**
+ * Class that represents a numeric minimum and a maximum
+ * 
+ * @param <T>
+ *          the type of number being used
+ */
+class Range<T extends Number> {
+
+  private static final String SEP = ",";
+
+  private T min;
+  private T max;
+
+  Range(T min, T max) {
+    this.min = min;
+    this.max = max;
+  }
+
+  /**
+   * @return the minimum value
+   */
+  T getLower() {
+    return min;
+  }
+
+  /**
+   * @return the maximum value
+   */
+  T getUpper() {
+    return max;
+  }
+
+  public String toString() {
+    return min + SEP + max;
+  }
+
+  /**
+   * Gets a long number between two values
+   * 
+   * @param rnd
+   * @param range
+   * 
+   * @return long
+   */
+  static long betweenPositive(Random rnd, Range<Long> range) {
+    if (range.getLower().equals(range.getUpper())) {
+      return range.getLower();
+    }
+    long nextRnd = rnd.nextLong();
+    long normRange = (range.getUpper() - range.getLower() + 1);
+    return Math.abs(nextRnd % normRange) + range.getLower();
+  }
+
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/Range.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.DataInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.slive.DataVerifier.VerifyOutput;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * Operation which selects a random file and selects a random read size (from
+ * the read size option) and reads from the start of that file to the read size
+ * (or the full file) and verifies the bytes that were written there.
+ * 
+ * This operation will capture statistics on success the time taken to read that
+ * file and the number of successful readings that occurred as well as the
+ * number of bytes read and the number of chunks verified and the number of
+ * chunks which failed verification and on failure or error it will capture the
+ * number of failures and the amount of time taken to fail
+ */
+class ReadOp extends Operation {
+  private static final Log LOG = LogFactory.getLog(ReadOp.class);
+
+  ReadOp(ConfigExtractor cfg, Random rnd) {
+    super(ReadOp.class.getSimpleName(), cfg, rnd);
+  }
+
+  /**
+   * Gets the file name to read
+   * 
+   * @return Path
+   */
+  protected Path getReadFile() {
+    Path fn = getFinder().getFile();
+    return fn;
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = super.run(fs);
+    DataInputStream is = null;
+    try {
+      Path fn = getReadFile();
+      Range<Long> readSizeRange = getConfig().getReadSize();
+      long readSize = 0;
+      String readStrAm = "";
+      if (getConfig().shouldReadFullFile()) {
+        readSize = Long.MAX_VALUE;
+        readStrAm = "full file";
+      } else {
+        readSize = Range.betweenPositive(getRandom(), readSizeRange);
+        readStrAm = Helper.toByteInfo(readSize);
+      }
+      long timeTaken = 0;
+      long chunkSame = 0;
+      long chunkDiff = 0;
+      long bytesRead = 0;
+      long startTime = 0;
+      DataVerifier vf = new DataVerifier();
+      LOG.info("Attempting to read file at " + fn + " of size (" + readStrAm
+          + ")");
+      {
+        // open
+        startTime = Timer.now();
+        is = fs.open(fn);
+        timeTaken += Timer.elapsed(startTime);
+        // read & verify
+        VerifyOutput vo = vf.verifyFile(readSize, is);
+        timeTaken += vo.getReadTime();
+        chunkSame += vo.getChunksSame();
+        chunkDiff += vo.getChunksDifferent();
+        bytesRead += vo.getBytesRead();
+        // capture close time
+        startTime = Timer.now();
+        is.close();
+        is = null;
+        timeTaken += Timer.elapsed(startTime);
+      }
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.OK_TIME_TAKEN, timeTaken));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.BYTES_READ, bytesRead));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.SUCCESSES, 1L));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.CHUNKS_VERIFIED, chunkSame));
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.CHUNKS_UNVERIFIED, chunkDiff));
+      LOG.info("Read " + Helper.toByteInfo(bytesRead) + " of " + fn + " with "
+          + chunkSame + " chunks being same as expected and " + chunkDiff
+          + " chunks being different than expected in " + timeTaken
+          + " milliseconds");
+
+    } catch (FileNotFoundException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.NOT_FOUND, 1L));
+      LOG.warn("Error with reading", e);
+    } catch (BadFileException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.BAD_FILES, 1L));
+      LOG.warn("Error reading bad file", e);
+    } catch (IOException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.FAILURES, 1L));
+      LOG.warn("Error reading", e);
+    } finally {
+      if (is != null) {
+        try {
+          is.close();
+        } catch (IOException e) {
+          LOG.warn("Error closing read stream", e);
+        }
+      }
+    }
+    return out;
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/ReadOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java?rev=946832&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java (added)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java Fri May 21 00:02:12 2010
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.slive;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+
+/**
+ * Operation which selects a random file and a second random file and attempts
+ * to rename that first file into the second file.
+ * 
+ * This operation will capture statistics on success the time taken to rename
+ * those files and the number of successful renames that occurred and on failure
+ * or error it will capture the number of failures and the amount of time taken
+ * to fail
+ */
+class RenameOp extends Operation {
+
+  /**
+   * Class that holds the src and target for renames
+   */
+  protected static class SrcTarget {
+    private Path src, target;
+
+    SrcTarget(Path src, Path target) {
+      this.src = src;
+      this.target = target;
+    }
+
+    Path getSrc() {
+      return src;
+    }
+
+    Path getTarget() {
+      return target;
+    }
+  }
+
+  private static final Log LOG = LogFactory.getLog(RenameOp.class);
+
+  RenameOp(ConfigExtractor cfg, Random rnd) {
+    super(RenameOp.class.getSimpleName(), cfg, rnd);
+  }
+
+  /**
+   * Gets the file names to rename
+   * 
+   * @return SrcTarget
+   */
+  protected SrcTarget getRenames() {
+    Path src = getFinder().getFile();
+    Path target = getFinder().getFile();
+    return new SrcTarget(src, target);
+  }
+
+  @Override // Operation
+  List<OperationOutput> run(FileSystem fs) {
+    List<OperationOutput> out = super.run(fs);
+    try {
+      // find the files to modify
+      SrcTarget targets = getRenames();
+      Path src = targets.getSrc();
+      Path target = targets.getTarget();
+      // capture results
+      boolean renamedOk = false;
+      long timeTaken = 0;
+      {
+        // rename it
+        long startTime = Timer.now();
+        renamedOk = fs.rename(src, target);
+        timeTaken = Timer.elapsed(startTime);
+      }
+      if (renamedOk) {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.OK_TIME_TAKEN, timeTaken));
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.SUCCESSES, 1L));
+        LOG.info("Renamed " + src + " to " + target);
+      } else {
+        out.add(new OperationOutput(OutputType.LONG, getType(),
+            ReportWriter.FAILURES, 1L));
+        LOG.warn("Could not rename " + src + " to " + target);
+      }
+    } catch (FileNotFoundException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.NOT_FOUND, 1L));
+      LOG.warn("Error with renaming", e);
+    } catch (IOException e) {
+      out.add(new OperationOutput(OutputType.LONG, getType(),
+          ReportWriter.FAILURES, 1L));
+      LOG.warn("Error with renaming", e);
+    }
+    return out;
+  }
+}

Propchange: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs/slive/RenameOp.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain



Mime
View raw message