Return-Path: Delivered-To: apmail-lucene-hadoop-commits-archive@locus.apache.org Received: (qmail 94121 invoked from network); 17 Apr 2007 19:58:56 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 17 Apr 2007 19:58:56 -0000 Received: (qmail 1131 invoked by uid 500); 17 Apr 2007 19:59:02 -0000 Delivered-To: apmail-lucene-hadoop-commits-archive@lucene.apache.org Received: (qmail 1041 invoked by uid 500); 17 Apr 2007 19:59:02 -0000 Mailing-List: contact hadoop-commits-help@lucene.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hadoop-dev@lucene.apache.org Delivered-To: mailing list hadoop-commits@lucene.apache.org Received: (qmail 1032 invoked by uid 99); 17 Apr 2007 19:59:02 -0000 Received: from herse.apache.org (HELO herse.apache.org) (140.211.11.133) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 17 Apr 2007 12:59:02 -0700 X-ASF-Spam-Status: No, hits=-99.5 required=10.0 tests=ALL_TRUSTED,NO_REAL_NAME X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 17 Apr 2007 12:58:54 -0700 Received: by eris.apache.org (Postfix, from userid 65534) id B21511A9838; Tue, 17 Apr 2007 12:58:34 -0700 (PDT) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r529744 - in /lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs: DFSClient.java DataNode.java FSDirectory.java FSNamesystem.java JspHelper.java NamenodeFsck.java SecondaryNameNode.java Date: Tue, 17 Apr 2007 19:58:34 -0000 To: hadoop-commits@lucene.apache.org From: tomwhite@apache.org X-Mailer: svnmailer-1.1.0 Message-Id: <20070417195834.B21511A9838@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: tomwhite Date: Tue Apr 17 12:58:32 2007 New Revision: 529744 URL: http://svn.apache.org/viewvc?view=rev&rev=529744 Log: HADOOP-1190. Fix unchecked warnings in dfs package. Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Tue Apr 17 12:58:32 2007 @@ -59,7 +59,8 @@ * A map from name -> DFSOutputStream of files that are currently being * written by this client. */ - private TreeMap pendingCreates = new TreeMap(); + private TreeMap pendingCreates = + new TreeMap(); /** * A class to track the list of DFS clients, so that they can be closed @@ -67,16 +68,14 @@ * @author Owen O'Malley */ private static class ClientFinalizer extends Thread { - private List clients = new ArrayList(); + private List clients = new ArrayList(); public synchronized void addClient(DFSClient client) { clients.add(client); } public synchronized void run() { - Iterator itr = clients.iterator(); - while (itr.hasNext()) { - DFSClient client = (DFSClient) itr.next(); + for (DFSClient client : clients) { if (client.running) { try { client.close(); @@ -529,13 +528,13 @@ Block oldBlocks[] = this.blocks; LocatedBlock results[] = namenode.open(src); - Vector blockV = new Vector(); - Vector nodeV = new Vector(); + Vector blockV = new Vector(); + Vector nodeV = new Vector(); for (int i = 0; i < results.length; i++) { blockV.add(results[i].getBlock()); nodeV.add(results[i].getLocations()); } - Block newBlocks[] = (Block[]) blockV.toArray(new Block[blockV.size()]); + Block[] newBlocks = blockV.toArray(new Block[blockV.size()]); if (oldBlocks != null) { for (int i = 0; i < oldBlocks.length; i++) { @@ -548,7 +547,7 @@ } } this.blocks = newBlocks; - this.nodes = (DatanodeInfo[][]) nodeV.toArray(new DatanodeInfo[nodeV.size()][]); + this.nodes = nodeV.toArray(new DatanodeInfo[nodeV.size()][]); this.currentNode = null; } Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Tue Apr 17 12:58:32 2007 @@ -111,7 +111,7 @@ DatanodeRegistration dnRegistration = null; private String networkLoc; volatile boolean shouldRun = true; - Vector receivedBlockList = new Vector(); + Vector receivedBlockList = new Vector(); int xmitsInProgress = 0; Daemon dataXceiveServer = null; long blockReportInterval; @@ -456,7 +456,7 @@ // // Send newly-received blockids to namenode // - blockArray = (Block[]) receivedBlockList.toArray(new Block[receivedBlockList.size()]); + blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]); } } if( blockArray != null ) { @@ -799,7 +799,7 @@ // // Track all the places we've successfully written the block // - Vector mirrors = new Vector(); + Vector mirrors = new Vector(); // // Open local disk out @@ -998,7 +998,7 @@ // reply.writeLong(WRITE_COMPLETE); mirrors.add(curTarget); - LocatedBlock newLB = new LocatedBlock(b, (DatanodeInfo[]) mirrors.toArray(new DatanodeInfo[mirrors.size()])); + LocatedBlock newLB = new LocatedBlock(b, mirrors.toArray(new DatanodeInfo[mirrors.size()])); newLB.write(reply); } finally { reply.close(); Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Tue Apr 17 12:58:32 2007 @@ -135,7 +135,7 @@ } else if (parent == null && "/".equals(target)) { return this; } else { - Vector components = new Vector(); + Vector components = new Vector(); int start = 0; int slashid = 0; while (start < target.length() && (slashid = target.indexOf('/', start)) >= 0) { @@ -151,8 +151,8 @@ /** */ - INode getNode(Vector components, int index) { - if (! name.equals((String) components.elementAt(index))) { + INode getNode(Vector components, int index) { + if (! name.equals(components.elementAt(index))) { return null; } if (index == components.size()-1) { @@ -160,7 +160,7 @@ } // Check with children - INode child = this.getChild((String)components.elementAt(index+1)); + INode child = this.getChild(components.elementAt(index+1)); if (child == null) { return null; } else { @@ -225,7 +225,7 @@ * This operation is performed after a node is removed from the tree, * and we want to GC all the blocks at this node and below. */ - void collectSubtreeBlocks(Vector v) { + void collectSubtreeBlocks(Vector v) { if (blocks != null) { for (int i = 0; i < blocks.length; i++) { v.add(blocks[i]); @@ -296,7 +296,7 @@ /** */ - void listContents(Vector v) { + void listContents(Vector v) { if (parent != null && blocks != null) { v.add(this); } @@ -310,7 +310,8 @@ FSNamesystem namesystem = null; INode rootDir = new INode(""); - TreeMap activeLocks = new TreeMap(); + TreeMap> activeLocks = + new TreeMap>(); FSImage fsImage; boolean ready = false; // Metrics record @@ -498,7 +499,7 @@ */ Block[] setReplication( String src, short replication, - Vector oldReplication + Vector oldReplication ) throws IOException { waitForReady(); Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication ); @@ -509,10 +510,10 @@ Block[] unprotectedSetReplication( String src, short replication, - Vector oldReplication + Vector oldReplication ) throws IOException { if( oldReplication == null ) - oldReplication = new Vector(); + oldReplication = new Vector(); oldReplication.setSize(1); oldReplication.set( 0, new Integer(-1) ); Block[] fileBlocks = null; @@ -583,13 +584,12 @@ } else { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " +src+" is removed" ); - Vector v = new Vector(); + Vector v = new Vector(); targetNode.collectSubtreeBlocks(v); - for (Iterator it = v.iterator(); it.hasNext(); ) { - Block b = (Block) it.next(); + for (Block b : v) { namesystem.blocksMap.removeINode(b); } - return (Block[]) v.toArray(new Block[v.size()]); + return v.toArray(new Block[v.size()]); } } } @@ -598,9 +598,9 @@ /** */ public int obtainLock(UTF8 src, UTF8 holder, boolean exclusive) { - TreeSet holders = (TreeSet) activeLocks.get(src); + TreeSet holders = activeLocks.get(src); if (holders == null) { - holders = new TreeSet(); + holders = new TreeSet(); activeLocks.put(src, holders); } if (exclusive && holders.size() > 0) { @@ -640,13 +640,13 @@ if (targetNode == null) { return null; } else { - Vector contents = new Vector(); + Vector contents = new Vector(); targetNode.listContents(contents); DFSFileInfo listing[] = new DFSFileInfo[contents.size()]; int i = 0; - for (Iterator it = contents.iterator(); it.hasNext(); i++) { - listing[i] = new DFSFileInfo( (INode) it.next() ); + for (Iterator it = contents.iterator(); it.hasNext(); i++) { + listing[i] = new DFSFileInfo(it.next()); } return listing; } @@ -701,7 +701,7 @@ src = normalizePath(new UTF8(src)); // Use this to collect all the dirs we need to construct - Vector v = new Vector(); + Vector v = new Vector(); // The dir itself v.add(src); @@ -717,7 +717,7 @@ // the way int numElts = v.size(); for (int i = numElts - 1; i >= 0; i--) { - String cur = (String) v.elementAt(i); + String cur = v.elementAt(i); try { INode inserted = unprotectedMkdir(cur); if (inserted != null) { Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Apr 17 12:58:32 2007 @@ -412,12 +412,12 @@ */ private class UnderReplicatedBlocks { private static final int LEVEL = 3; - TreeSet[] priorityQueues = new TreeSet[LEVEL]; + List> priorityQueues = new ArrayList>(); /* constructor */ UnderReplicatedBlocks() { for(int i=0; i(); + priorityQueues.add(new TreeSet()); } } @@ -425,7 +425,7 @@ synchronized int size() { int size = 0; for( int i=0; i= 0 && priLevel < LEVEL - && priorityQueues[priLevel].remove(block) ) { + && priorityQueues.get(priLevel).remove(block) ) { NameNode.stateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: " + "Removing block " + block.getBlockName() @@ -506,7 +506,7 @@ return true; } else { for(int i=0; i iterator() { return new Iterator() { int level; - Iterator[] iterator = new Iterator[LEVEL]; + List> iterators = new ArrayList>(); { level=0; for(int i=0; i deadNodes = new TreeSet(); DatanodeInfo chosenNode = null; int failures = 0; Socket s = null; Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Tue Apr 17 12:58:32 2007 @@ -308,7 +308,7 @@ OutputStream fos) throws Exception { int failures = 0; InetSocketAddress targetAddr = null; - TreeSet deadNodes = new TreeSet(); + TreeSet deadNodes = new TreeSet(); Socket s = null; DataInputStream in = null; DataOutputStream out = null; @@ -400,7 +400,7 @@ */ Random r = new Random(); private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes, - TreeSet deadNodes) throws IOException { + TreeSet deadNodes) throws IOException { if ((nodes == null) || (nodes.length - deadNodes.size() < 1)) { throw new IOException("No live nodes contain current block"); @@ -463,7 +463,7 @@ * @author Andrzej Bialecki */ public class FsckResult { - private ArrayList missingIds = new ArrayList(); + private ArrayList missingIds = new ArrayList(); private long missingSize = 0L; private long corruptFiles = 0L; private long overReplicatedBlocks = 0L; @@ -490,7 +490,7 @@ } /** Return a list of missing block names (as list of Strings). */ - public ArrayList getMissingIds() { + public ArrayList getMissingIds() { return missingIds; } Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?view=diff&rev=529744&r1=529743&r2=529744 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Tue Apr 17 12:58:32 2007 @@ -425,6 +425,7 @@ * @author Dhruba Borthakur */ public static class GetImageServlet extends HttpServlet { + @SuppressWarnings("unchecked") public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException {