Index: src/java/org/apache/nutch/fetcher/Fetcher2.java
===================================================================
--- src/java/org/apache/nutch/fetcher/Fetcher2.java	(revision 562509)
+++ src/java/org/apache/nutch/fetcher/Fetcher2.java	(working copy)
@@ -342,9 +342,10 @@
     }
     
     public synchronized FetchItem getFetchItem() {
-      Iterator it = queues.keySet().iterator();
+      Iterator<Map.Entry<String, FetchItemQueue>> it = 
+        queues.entrySet().iterator();
       while (it.hasNext()) {
-        FetchItemQueue fiq = queues.get(it.next());
+        FetchItemQueue fiq = it.next().getValue();
         // reap empty queues
         if (fiq.getQueueSize() == 0 && fiq.getInProgressSize() == 0) {
           it.remove();
Index: src/java/org/apache/nutch/analysis/TokenMgrError.java
===================================================================
--- src/java/org/apache/nutch/analysis/TokenMgrError.java	(revision 562509)
+++ src/java/org/apache/nutch/analysis/TokenMgrError.java	(working copy)
@@ -1,6 +1,7 @@
 /* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 3.0 */
 package org.apache.nutch.analysis;
 
+@SuppressWarnings("serial")
 class TokenMgrError extends Error
 {
    /*
Index: src/java/org/apache/nutch/analysis/CommonGrams.java
===================================================================
--- src/java/org/apache/nutch/analysis/CommonGrams.java	(revision 562509)
+++ src/java/org/apache/nutch/analysis/CommonGrams.java	(working copy)
@@ -42,7 +42,8 @@
   /** The key used to cache commonTerms in Configuration */
   private static final String KEY = CommonGrams.class.getName();
 
-  private HashMap commonTerms = new HashMap();
+  private HashMap<String, HashSet<String>> commonTerms = 
+    new HashMap<String, HashSet<String>>();
   
   /**
    * The constructor.
@@ -53,14 +54,14 @@
   }
 
   private static class Filter extends TokenFilter {
-    private HashSet common;
+    private HashSet<String> common;
     private Token previous;
-    private LinkedList gramQueue = new LinkedList();
-    private LinkedList nextQueue = new LinkedList();
+    private LinkedList<Token> gramQueue = new LinkedList<Token>();
+    private LinkedList<Token> nextQueue = new LinkedList<Token>();
     private StringBuffer buffer = new StringBuffer();
 
     /** Construct an n-gram producing filter. */
-    public Filter(TokenStream input, HashSet common) {
+    public Filter(TokenStream input, HashSet<String> common) {
       super(input);
       this.common = common;
     }
@@ -68,7 +69,7 @@
     /** Inserts n-grams into a token stream. */
     public Token next() throws IOException {
       if (gramQueue.size() != 0)                  // consume any queued tokens
-        return (Token)gramQueue.removeFirst();
+        return gramQueue.removeFirst();
 
       final Token token = popNext();
       if (token == null)
@@ -81,7 +82,7 @@
 
       gramQueue.add(token);                       // queue the token
 
-      ListIterator i = nextQueue.listIterator();
+      ListIterator<Token> i = nextQueue.listIterator();
       Token gram = token;
       while (isCommon(gram)) {
         if (previous != null && !isCommon(previous)) // queue prev gram first
@@ -96,7 +97,7 @@
       }
 
       previous = token;
-      return (Token)gramQueue.removeFirst();
+      return gramQueue.removeFirst();
     }
 
     /** True iff token is for a common term. */
@@ -107,13 +108,13 @@
     /** Pops nextQueue or, if empty, reads a new token. */
     private Token popNext() throws IOException {
       if (nextQueue.size() > 0)
-        return (Token)nextQueue.removeFirst();
+        return nextQueue.removeFirst();
       else
         return input.next();
     }
 
     /** Return next token in nextQueue, extending it when empty. */
-    private Token peekNext(ListIterator i) throws IOException {
+    private Token peekNext(ListIterator<Token> i) throws IOException {
       if (!i.hasNext()) {
         Token next = input.next();
         if (next == null)
@@ -121,7 +122,7 @@
         i.add(next);
         i.previous();
       }
-      return (Token)i.next();
+      return i.next();
     }
 
     /** Construct a compound token. */
@@ -141,12 +142,12 @@
   /** Construct using the provided config file. */
   private void init(Configuration conf) {
     // First, try to retrieve some commonTerms cached in configuration.
-    commonTerms = (HashMap) conf.getObject(KEY);
+    commonTerms = (HashMap<String, HashSet<String>>) conf.getObject(KEY);
     if (commonTerms != null) { return; }
 
     // Otherwise, read the terms.file
     try {
-      commonTerms = new HashMap();
+      commonTerms = new HashMap<String, HashSet<String>>();
       Reader reader = conf.getConfResourceAsReader
         (conf.get("analysis.common.terms.file"));
       BufferedReader in = new BufferedReader(reader);
@@ -175,9 +176,9 @@
         while ((token = ts.next()) != null) {
           gram = gram + SEPARATOR + token.termText();
         }
-        HashSet table = (HashSet)commonTerms.get(field);
+        HashSet<String> table = commonTerms.get(field);
         if (table == null) {
-          table = new HashSet();
+          table = new HashSet<String>();
           commonTerms.put(field, table);
         }
         table.add(gram);
@@ -191,7 +192,7 @@
   /** Construct a token filter that inserts n-grams for common terms.  For use
    * while indexing documents.  */
   public TokenFilter getFilter(TokenStream ts, String field) {
-    return new Filter(ts, (HashSet)commonTerms.get(field));
+    return new Filter(ts, commonTerms.get(field));
   }
 
   /** Utility to convert an array of Query.Terms into a token stream. */
@@ -217,7 +218,7 @@
     if (LOG.isTraceEnabled()) {
       LOG.trace("Optimizing " + phrase + " for " + field);
     }
-    ArrayList result = new ArrayList();
+    ArrayList<String> result = new ArrayList<String>();
     TokenStream ts = getFilter(new ArrayTokens(phrase), field);
     Token token, prev=null;
     int position = 0;
@@ -236,7 +237,7 @@
     if (prev != null)
       result.add(prev.termText());
 
-    return (String[])result.toArray(new String[result.size()]);
+    return result.toArray(new String[result.size()]);
   }
 
   private int arity(String gram) {
Index: src/java/org/apache/nutch/analysis/NutchAnalysis.java
===================================================================
--- src/java/org/apache/nutch/analysis/NutchAnalysis.java	(revision 562509)
+++ src/java/org/apache/nutch/analysis/NutchAnalysis.java	(working copy)
@@ -72,7 +72,7 @@
 /** Parse a query. */
   final public Query parse(Configuration conf) throws ParseException {
   Query query = new Query(conf);
-  ArrayList terms;
+  ArrayList<String> terms;
   Token token;
   String field;
   boolean stop;
@@ -140,7 +140,7 @@
         throw new ParseException();
       }
       nonOpOrTerm();
-      String[] array = (String[])terms.toArray(new String[terms.size()]);
+      String[] array = terms.toArray(new String[terms.size()]);
 
       if (stop
           && field == Clause.DEFAULT_FIELD
@@ -160,10 +160,10 @@
 
 /** Parse an explcitly quoted phrase query.  Note that this may return a single
  * term, a trivial phrase.*/
-  final public ArrayList phrase(String field) throws ParseException {
+  final public ArrayList<String> phrase(String field) throws ParseException {
   int start;
   int end;
-  ArrayList result = new ArrayList();
+  ArrayList<String> result = new ArrayList<String>();
   String term;
     jj_consume_token(QUOTE);
     start = token.endColumn;
@@ -244,9 +244,9 @@
 /** Parse a compound term that is interpreted as an implicit phrase query.
  * Compounds are a sequence of terms separated by infix characters.  Note that
  * htis may return a single term, a trivial compound. */
-  final public ArrayList compound(String field) throws ParseException {
+  final public ArrayList<String> compound(String field) throws ParseException {
   int start;
-  ArrayList result = new ArrayList();
+  ArrayList<String> result = new ArrayList<String>();
   String term;
   StringBuffer terms = new StringBuffer();
     start = token.endColumn;
@@ -830,7 +830,7 @@
       return (jj_ntk = jj_nt.kind);
   }
 
-  private java.util.Vector jj_expentries = new java.util.Vector();
+  private java.util.Vector<int[]> jj_expentries = new java.util.Vector<int[]>();
   private int[] jj_expentry;
   private int jj_kind = -1;
   private int[] jj_lasttokens = new int[100];
@@ -846,8 +846,8 @@
         jj_expentry[i] = jj_lasttokens[i];
       }
       boolean exists = false;
-      for (java.util.Enumeration e = jj_expentries.elements(); e.hasMoreElements();) {
-        int[] oldentry = (int[])(e.nextElement());
+      for (java.util.Enumeration<int[]> e = jj_expentries.elements(); e.hasMoreElements();) {
+        int[] oldentry = (e.nextElement());
         if (oldentry.length == jj_expentry.length) {
           exists = true;
           for (int i = 0; i < jj_expentry.length; i++) {
@@ -895,7 +895,7 @@
     jj_add_error_token(0, 0);
     int[][] exptokseq = new int[jj_expentries.size()][];
     for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = (int[])jj_expentries.elementAt(i);
+      exptokseq[i] = jj_expentries.elementAt(i);
     }
     return new ParseException(token, exptokseq, tokenImage);
   }
Index: src/java/org/apache/nutch/analysis/NutchDocumentAnalyzer.java
===================================================================
--- src/java/org/apache/nutch/analysis/NutchDocumentAnalyzer.java	(revision 562509)
+++ src/java/org/apache/nutch/analysis/NutchDocumentAnalyzer.java	(working copy)
@@ -45,7 +45,6 @@
   public static final int INTER_ANCHOR_GAP = 4;
   /** Analyzer used to analyze anchors. */
   private static Analyzer ANCHOR_ANALYZER;
-  private Configuration conf;
 
   /**
    * @param conf
Index: src/java/org/apache/nutch/analysis/ParseException.java
===================================================================
--- src/java/org/apache/nutch/analysis/ParseException.java	(revision 562509)
+++ src/java/org/apache/nutch/analysis/ParseException.java	(working copy)
@@ -10,6 +10,7 @@
  * You can modify this class to customize your error reporting
  * mechanisms so long as you retain the public fields.
  */
+@SuppressWarnings("serial")
 class ParseException extends java.io.IOException  {
 
   /**
Index: src/java/org/apache/nutch/servlet/Cached.java
===================================================================
--- src/java/org/apache/nutch/servlet/Cached.java	(revision 562509)
+++ src/java/org/apache/nutch/servlet/Cached.java	(working copy)
@@ -43,6 +43,7 @@
  * @author John Xing
  */
 
+@SuppressWarnings("serial")
 public class Cached extends HttpServlet {
 
   NutchBean bean = null;
@@ -71,8 +72,8 @@
     if (bean == null)
       return;
 
-    if (bean.LOG.isInfoEnabled()) {
-      bean.LOG.info("request from " + request.getRemoteAddr());
+    if (NutchBean.LOG.isInfoEnabled()) {
+      NutchBean.LOG.info("request from " + request.getRemoteAddr());
     }
 
     Hit hit = new Hit(Integer.parseInt(request.getParameter("idx")),
Index: src/java/org/apache/nutch/net/protocols/ProtocolException.java
===================================================================
--- src/java/org/apache/nutch/net/protocols/ProtocolException.java	(revision 562509)
+++ src/java/org/apache/nutch/net/protocols/ProtocolException.java	(working copy)
@@ -23,6 +23,7 @@
  * Base exception for all protocol handlers
  * @deprecated Use {@link org.apache.nutch.protocol.ProtocolException} instead.
  */
+@SuppressWarnings("serial")
 public class ProtocolException extends Exception implements Serializable {
 
 
Index: src/java/org/apache/nutch/net/protocols/HttpDateFormat.java
===================================================================
--- src/java/org/apache/nutch/net/protocols/HttpDateFormat.java	(revision 562509)
+++ src/java/org/apache/nutch/net/protocols/HttpDateFormat.java	(working copy)
@@ -111,16 +111,14 @@
   }
 
   public static void main(String[] args) throws Exception {
-    HttpDateFormat format = new HttpDateFormat();
-
     Date now = new Date(System.currentTimeMillis());
 
-    String string = format.toString(now);
+    String string = HttpDateFormat.toString(now);
 
-    long time = format.toLong(string);
+    long time = HttpDateFormat.toLong(string);
 
     System.out.println(string);
-    System.out.println(format.toString(time));
+    System.out.println(HttpDateFormat.toString(time));
   }
 
 }
Index: src/java/org/apache/nutch/net/URLFilterException.java
===================================================================
--- src/java/org/apache/nutch/net/URLFilterException.java	(revision 562509)
+++ src/java/org/apache/nutch/net/URLFilterException.java	(working copy)
@@ -17,6 +17,7 @@
 
 package org.apache.nutch.net;
 
+@SuppressWarnings("serial")
 public class URLFilterException extends Exception {
 
   public URLFilterException() {
Index: src/java/org/apache/nutch/net/URLNormalizers.java
===================================================================
--- src/java/org/apache/nutch/net/URLNormalizers.java	(revision 562509)
+++ src/java/org/apache/nutch/net/URLNormalizers.java	(working copy)
@@ -99,7 +99,7 @@
   public static final Log LOG = LogFactory.getLog(URLNormalizers.class);
 
   /* Empty extension list for caching purposes. */
-  private final List EMPTY_EXTENSION_LIST = Collections.EMPTY_LIST;
+  private final List<Extension> EMPTY_EXTENSION_LIST = Collections.EMPTY_LIST;
   
   private final URLNormalizer[] EMPTY_NORMALIZERS = new URLNormalizer[0];
 
@@ -147,17 +147,17 @@
    * @throws PluginRuntimeException
    */
   URLNormalizer[] getURLNormalizers(String scope) {
-    List extensions = getExtensions(scope);
+    List<Extension> extensions = getExtensions(scope);
     
     if (extensions == EMPTY_EXTENSION_LIST) {
       return EMPTY_NORMALIZERS;
     }
     
-    List normalizers = new Vector(extensions.size());
+    List<URLNormalizer> normalizers = new Vector<URLNormalizer>(extensions.size());
 
-    Iterator it = extensions.iterator();
+    Iterator<Extension> it = extensions.iterator();
     while (it.hasNext()) {
-      Extension ext = (Extension) it.next();
+      Extension ext = it.next();
       URLNormalizer normalizer = null;
       try {
         // check to see if we've cached this URLNormalizer instance yet
@@ -177,7 +177,7 @@
                 + "function: attempting to continue instantiating plugins");
       }
     }
-    return (URLNormalizer[]) normalizers.toArray(new URLNormalizer[normalizers
+    return normalizers.toArray(new URLNormalizer[normalizers
             .size()]);
   }
 
@@ -190,9 +190,9 @@
    *         empty list.
    * @throws PluginRuntimeException
    */
-  private List getExtensions(String scope) {
+  private List<Extension> getExtensions(String scope) {
 
-    List extensions = (List) this.conf.getObject(URLNormalizer.X_POINT_ID + "_x_"
+    List<Extension> extensions = (List<Extension>) this.conf.getObject(URLNormalizer.X_POINT_ID + "_x_"
             + scope);
 
     // Just compare the reference:
@@ -224,7 +224,7 @@
    *         returns null.
    * @throws PluginRuntimeException
    */
-  private List findExtensions(String scope) {
+  private List<Extension> findExtensions(String scope) {
 
     String[] orders = null;
     String orderlist = conf.get("urlnormalizer.order." + scope);
@@ -233,26 +233,26 @@
       orders = orderlist.split("\\s+");
     }
     String scopelist = conf.get("urlnormalizer.scope." + scope);
-    Set impls = null;
+    Set<String> impls = null;
     if (scopelist != null && !scopelist.trim().equals("")) {
       String[] names = scopelist.split("\\s+");
-      impls = new HashSet(Arrays.asList(names));
+      impls = new HashSet<String>(Arrays.asList(names));
     }
     Extension[] extensions = this.extensionPoint.getExtensions();
-    HashMap normalizerExtensions = new HashMap();
+    HashMap<String, Extension> normalizerExtensions = new HashMap<String, Extension>();
     for (int i = 0; i < extensions.length; i++) {
       Extension extension = extensions[i];
       if (impls != null && !impls.contains(extension.getClazz()))
         continue;
       normalizerExtensions.put(extension.getClazz(), extension);
     }
-    List res = new ArrayList();
+    List<Extension> res = new ArrayList<Extension>();
     if (orders == null) {
       res.addAll(normalizerExtensions.values());
     } else {
       // first add those explicitly named in correct order
       for (int i = 0; i < orders.length; i++) {
-        Extension e = (Extension)normalizerExtensions.get(orders[i]);
+        Extension e = normalizerExtensions.get(orders[i]);
         if (e != null) {
           res.add(e);
           normalizerExtensions.remove(orders[i]);
Index: src/java/org/apache/nutch/net/URLFilters.java
===================================================================
--- src/java/org/apache/nutch/net/URLFilters.java	(revision 562509)
+++ src/java/org/apache/nutch/net/URLFilters.java	(working copy)
@@ -19,6 +19,7 @@
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.nutch.plugin.Extension;
 import org.apache.nutch.plugin.ExtensionPoint;
@@ -48,7 +49,7 @@
         if (point == null)
           throw new RuntimeException(URLFilter.X_POINT_ID + " not found.");
         Extension[] extensions = point.getExtensions();
-        HashMap filterMap = new HashMap();
+        Map<String, URLFilter> filterMap = new HashMap<String, URLFilter>();
         for (int i = 0; i < extensions.length; i++) {
           Extension extension = extensions[i];
           URLFilter filter = (URLFilter) extension.getExtensionInstance();
@@ -60,9 +61,9 @@
           conf.setObject(URLFilter.class.getName(), filterMap.values().toArray(
               new URLFilter[0]));
         } else {
-          ArrayList filters = new ArrayList();
+          ArrayList<URLFilter> filters = new ArrayList<URLFilter>();
           for (int i = 0; i < orderedFilters.length; i++) {
-            URLFilter filter = (URLFilter) filterMap.get(orderedFilters[i]);
+            URLFilter filter = filterMap.get(orderedFilters[i]);
             if (filter != null) {
               filters.add(filter);
             }
Index: src/java/org/apache/nutch/indexer/IndexingException.java
===================================================================
--- src/java/org/apache/nutch/indexer/IndexingException.java	(revision 562509)
+++ src/java/org/apache/nutch/indexer/IndexingException.java	(working copy)
@@ -17,6 +17,7 @@
 
 package org.apache.nutch.indexer;
 
+@SuppressWarnings("serial")
 public class IndexingException extends Exception {
 
   public IndexingException() {
Index: src/java/org/apache/nutch/indexer/IndexSorter.java
===================================================================
--- src/java/org/apache/nutch/indexer/IndexSorter.java	(revision 562509)
+++ src/java/org/apache/nutch/indexer/IndexSorter.java	(working copy)
@@ -40,12 +40,12 @@
 public class IndexSorter extends ToolBase {
   private static final Log LOG = LogFactory.getLog(IndexSorter.class);
   
-  private static class PostingMap implements Comparable {
+  private static class PostingMap implements Comparable<PostingMap> {
     private int newDoc;
     private long offset;
 
-    public int compareTo(Object o) {              // order by newDoc id
-      return this.newDoc - ((PostingMap)o).newDoc;
+    public int compareTo(PostingMap pm) {              // order by newDoc id
+      return this.newDoc - pm.newDoc;
     }
   }
 
@@ -225,12 +225,11 @@
 
   }
 
-  private static class DocScore implements Comparable {
+  private static class DocScore implements Comparable<DocScore> {
     private int oldDoc;
     private float score;
 
-    public int compareTo(Object o) {              // order by score, oldDoc
-      DocScore that = (DocScore)o;
+    public int compareTo(DocScore that) {            // order by score, oldDoc
       if (this.score == that.score) {
         return this.oldDoc - that.oldDoc;
       } else {
Index: src/java/org/apache/nutch/indexer/NutchSimilarity.java
===================================================================
--- src/java/org/apache/nutch/indexer/NutchSimilarity.java	(revision 562509)
+++ src/java/org/apache/nutch/indexer/NutchSimilarity.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.search.DefaultSimilarity;
 
 /** Similarity implementatation used by Nutch indexing and search. */
+@SuppressWarnings("serial")
 public class NutchSimilarity extends DefaultSimilarity  {
   private static final int MIN_CONTENT_LENGTH = 1000;
 
Index: src/java/org/apache/nutch/indexer/IndexingFilters.java
===================================================================
--- src/java/org/apache/nutch/indexer/IndexingFilters.java	(revision 562509)
+++ src/java/org/apache/nutch/indexer/IndexingFilters.java	(working copy)
@@ -62,14 +62,13 @@
         if (point == null)
           throw new RuntimeException(IndexingFilter.X_POINT_ID + " not found.");
         Extension[] extensions = point.getExtensions();
-        HashMap filterMap = new HashMap();
+        HashMap<String, IndexingFilter> filterMap = 
+          new HashMap<String, IndexingFilter>();
         for (int i = 0; i < extensions.length; i++) {
           Extension extension = extensions[i];
           IndexingFilter filter = (IndexingFilter) extension
               .getExtensionInstance();
-          if (LOG.isInfoEnabled()) {
-            LOG.info("Adding " + filter.getClass().getName());
-          }
+          LOG.info("Adding " + filter.getClass().getName());
           if (!filterMap.containsKey(filter.getClass().getName())) {
             filterMap.put(filter.getClass().getName(), filter);
           }
@@ -80,13 +79,13 @@
          */
         if (orderedFilters == null) {
           conf.setObject(IndexingFilter.class.getName(),
-              (IndexingFilter[]) filterMap.values().toArray(
+              filterMap.values().toArray(
                   new IndexingFilter[0]));
           /* Otherwise run the filters in the required order */
         } else {
           ArrayList<IndexingFilter> filters = new ArrayList<IndexingFilter>();
           for (int i = 0; i < orderedFilters.length; i++) {
-            IndexingFilter filter = (IndexingFilter) filterMap
+            IndexingFilter filter = filterMap
                 .get(orderedFilters[i]);
             if (filter != null) {
               filters.add(filter);
Index: src/java/org/apache/nutch/tools/PruneIndexTool.java
===================================================================
--- src/java/org/apache/nutch/tools/PruneIndexTool.java	(revision 562509)
+++ src/java/org/apache/nutch/tools/PruneIndexTool.java	(working copy)
@@ -269,7 +269,7 @@
       numIdx = 1;
     } else {
       Directory dir;
-      Vector indexes = new Vector(indexDirs.length);
+      Vector<IndexReader> indexes = new Vector<IndexReader>(indexDirs.length);
       for (int i = 0; i < indexDirs.length; i++) {
         try {
           dir = FSDirectory.getDirectory(indexDirs[i], false);
@@ -297,7 +297,7 @@
         }
       }
       if (indexes.size() == 0) throw new Exception("No input indexes.");
-      IndexReader[] readers = (IndexReader[])indexes.toArray(new IndexReader[0]);
+      IndexReader[] readers = indexes.toArray(new IndexReader[0]);
       reader = new MultiReader(readers);
     }
     if (LOG.isInfoEnabled()) {
@@ -414,7 +414,7 @@
       if (LOG.isFatalEnabled()) { LOG.fatal("Not a directory: " + idx); }
       return;
     }
-    Vector paths = new Vector();
+    Vector<File> paths = new Vector<File>();
     if (IndexReader.indexExists(idx)) {
       paths.add(idx);
     } else {
@@ -443,7 +443,7 @@
         return;
       }
     }
-    File[] indexes = (File[])paths.toArray(new File[0]);
+    File[] indexes = paths.toArray(new File[0]);
     boolean force = false;
     boolean dryrun = false;
     String qPath = null;
@@ -468,12 +468,12 @@
         return;
       }
     }
-    Vector cv = new Vector();
+    Vector<PruneChecker> cv = new Vector<PruneChecker>();
     if (fList != null) {
       StringTokenizer st = new StringTokenizer(fList, ",");
-      Vector tokens = new Vector();
+      Vector<String> tokens = new Vector<String>();
       while (st.hasMoreTokens()) tokens.add(st.nextToken());
-      String[] fields = (String[])tokens.toArray(new String[0]);
+      String[] fields = tokens.toArray(new String[0]);
       PruneChecker pc = new PrintFieldsChecker(System.out, fields);
       cv.add(pc);
     }
@@ -485,7 +485,7 @@
 
     PruneChecker[] checkers = null;
     if (cv.size() > 0) {
-      checkers = (PruneChecker[])cv.toArray(new PruneChecker[0]);
+      checkers = cv.toArray(new PruneChecker[0]);
     }
     Query[] queries = null;
     InputStream is = null;
@@ -535,7 +535,7 @@
     BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"));
     String line = null;
     QueryParser qp = new QueryParser("url", new WhitespaceAnalyzer());
-    Vector queries = new Vector();
+    Vector<Query> queries = new Vector<Query>();
     while ((line = br.readLine()) != null) {
       line = line.trim();
       //skip blanks and comments
@@ -543,7 +543,7 @@
       Query q = qp.parse(line);
       queries.add(q);
     }
-    return (Query[])queries.toArray(new Query[0]);
+    return queries.toArray(new Query[0]);
   }
   
   private static void usage() {
Index: src/java/org/apache/nutch/tools/DmozParser.java
===================================================================
--- src/java/org/apache/nutch/tools/DmozParser.java	(revision 562509)
+++ src/java/org/apache/nutch/tools/DmozParser.java	(working copy)
@@ -209,16 +209,14 @@
      * When parsing begins
      */
     public void startDocument() {
-      if (LOG.isInfoEnabled()) { LOG.info("Begin parse"); }
+      LOG.info("Begin parse");
     }
 
     /**
      * When parsing ends
      */
     public void endDocument() {
-      if (LOG.isInfoEnabled()) {
-        LOG.info("Completed parse.  Found " + pages + " pages.");
-      }
+      LOG.info("Completed parse.  Found " + pages + " pages.");
     }
 
     /**
@@ -289,7 +287,7 @@
                        skew, topicPattern);
     reader.setContentHandler(rp);
     reader.setErrorHandler(rp);
-    if (LOG.isInfoEnabled()) { LOG.info("skew = " + rp.hashSkew); }
+     LOG.info("skew = " + rp.hashSkew);
 
     //
     // Open filtered text stream.  The TextFilter makes sure that
@@ -311,8 +309,9 @@
     }
   }
 
-  private static void addTopicsFromFile(String topicFile, Vector topics)
-    throws IOException {
+  private static void addTopicsFromFile(String topicFile, 
+                                        Vector<String> topics)
+  throws IOException {
     BufferedReader in = null;
     try {
       in = new BufferedReader(new InputStreamReader(new FileInputStream(topicFile), "UTF-8"));
@@ -352,7 +351,7 @@
     String dmozFile = argv[0];
     boolean includeAdult = false;
     Pattern topicPattern = null; 
-    Vector topics = new Vector(); 
+    Vector<String> topics = new Vector<String>(); 
     
     Configuration conf = NutchConfiguration.create();
     FileSystem fs = FileSystem.get(conf);
@@ -381,14 +380,12 @@
         String regExp = new String("^("); 
         int j = 0;
         for ( ; j < topics.size() - 1; ++j) {
-          regExp = regExp.concat((String) topics.get(j));
+          regExp = regExp.concat(topics.get(j));
           regExp = regExp.concat("|");
         }
-        regExp = regExp.concat((String) topics.get(j));
+        regExp = regExp.concat(topics.get(j));
         regExp = regExp.concat(").*"); 
-        if (LOG.isInfoEnabled()) {
-          LOG.info("Topic selection pattern = " + regExp);
-        }
+        LOG.info("Topic selection pattern = " + regExp);
         topicPattern = Pattern.compile(regExp); 
       }
 
Index: src/java/org/apache/nutch/protocol/ProtocolException.java
===================================================================
--- src/java/org/apache/nutch/protocol/ProtocolException.java	(revision 562509)
+++ src/java/org/apache/nutch/protocol/ProtocolException.java	(working copy)
@@ -17,6 +17,7 @@
 
 package org.apache.nutch.protocol;
 
+@SuppressWarnings("serial")
 public class ProtocolException extends Exception {
 
   public ProtocolException() {
Index: src/java/org/apache/nutch/protocol/ProtocolStatus.java
===================================================================
--- src/java/org/apache/nutch/protocol/ProtocolStatus.java	(revision 562509)
+++ src/java/org/apache/nutch/protocol/ProtocolStatus.java	(working copy)
@@ -87,7 +87,8 @@
   private long lastModified;
   private String[] args;
   
-  private static HashMap codeToName = new HashMap();
+  private static final HashMap<Integer, String> codeToName = 
+    new HashMap<Integer, String>();
   static {
     codeToName.put(new Integer(SUCCESS), "success");
     codeToName.put(new Integer(FAILED), "failed");
Index: src/java/org/apache/nutch/protocol/ProtocolNotFound.java
===================================================================
--- src/java/org/apache/nutch/protocol/ProtocolNotFound.java	(revision 562509)
+++ src/java/org/apache/nutch/protocol/ProtocolNotFound.java	(working copy)
@@ -17,6 +17,7 @@
 
 package org.apache.nutch.protocol;
 
+@SuppressWarnings("serial")
 public class ProtocolNotFound extends ProtocolException {
   private String url;
 
Index: src/java/org/apache/nutch/segment/SegmentMerger.java
===================================================================
--- src/java/org/apache/nutch/segment/SegmentMerger.java	(revision 562509)
+++ src/java/org/apache/nutch/segment/SegmentMerger.java	(working copy)
@@ -266,7 +266,7 @@
         }
 
         public void close(Reporter reporter) throws IOException {
-          Iterator it = sliceWriters.values().iterator();
+          Iterator<Object> it = sliceWriters.values().iterator();
           while (it.hasNext()) {
             Object o = it.next();
             if (o instanceof SequenceFile.Writer) {
@@ -350,7 +350,8 @@
     String lastCname = null;
     String lastPDname = null;
     String lastPTname = null;
-    TreeMap linked = new TreeMap();
+    TreeMap<String, ArrayList<CrawlDatum>> linked = 
+      new TreeMap<String, ArrayList<CrawlDatum>>();
     while (values.hasNext()) {
       MetaWrapper wrapper = (MetaWrapper)values.next();
       Object o = wrapper.get();
@@ -399,9 +400,9 @@
             continue;
           }
           // collect all LINKED values from the latest segment
-          ArrayList segLinked = (ArrayList)linked.get(sp.segmentName);
+          ArrayList<CrawlDatum> segLinked = linked.get(sp.segmentName);
           if (segLinked == null) {
-            segLinked = new ArrayList();
+            segLinked = new ArrayList<CrawlDatum>();
             linked.put(sp.segmentName, segLinked);
           }
           segLinked.add(val);
@@ -492,13 +493,13 @@
       output.collect(key, wrapper);
     }
     if (linked.size() > 0) {
-      String name = (String)linked.lastKey();
+      String name = linked.lastKey();
       sp.partName = CrawlDatum.PARSE_DIR_NAME;
       sp.segmentName = name;
       wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
-      ArrayList segLinked = (ArrayList)linked.get(name);
+      ArrayList<CrawlDatum> segLinked = linked.get(name);
       for (int i = 0; i < segLinked.size(); i++) {
-        CrawlDatum link = (CrawlDatum)segLinked.get(i);
+        CrawlDatum link = segLinked.get(i);
         wrapper.set(link);
         output.collect(key, wrapper);
       }
@@ -613,7 +614,7 @@
     Configuration conf = NutchConfiguration.create();
     final FileSystem fs = FileSystem.get(conf);
     Path out = new Path(args[0]);
-    ArrayList segs = new ArrayList();
+    ArrayList<Path> segs = new ArrayList<Path>();
     long sliceSize = 0;
     boolean filter = false;
     for (int i = 1; i < args.length; i++) {
@@ -642,7 +643,7 @@
       return;
     }
     SegmentMerger merger = new SegmentMerger(conf);
-    merger.merge(out, (Path[]) segs.toArray(new Path[segs.size()]), filter, sliceSize);
+    merger.merge(out, segs.toArray(new Path[segs.size()]), filter, sliceSize);
   }
 
 }
Index: src/java/org/apache/nutch/segment/SegmentReader.java
===================================================================
--- src/java/org/apache/nutch/segment/SegmentReader.java	(revision 562509)
+++ src/java/org/apache/nutch/segment/SegmentReader.java	(working copy)
@@ -276,13 +276,13 @@
   };
 
   public void get(final Path segment, final Text key, Writer writer,
-          final Map results) throws Exception {
-    if (LOG.isInfoEnabled()) { LOG.info("SegmentReader: get '" + key + "'"); }
-    ArrayList threads = new ArrayList();
+          final Map<String, List<Writable>> results) throws Exception {
+    LOG.info("SegmentReader: get '" + key + "'");
+    ArrayList<Thread> threads = new ArrayList<Thread>();
     if (co) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getMapRecords(new Path(segment, Content.DIR_NAME), key);
+          List<Writable> res = getMapRecords(new Path(segment, Content.DIR_NAME), key);
           results.put("co", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
@@ -292,7 +292,7 @@
     if (fe) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getMapRecords(new Path(segment, CrawlDatum.FETCH_DIR_NAME), key);
+          List<Writable> res = getMapRecords(new Path(segment, CrawlDatum.FETCH_DIR_NAME), key);
           results.put("fe", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
@@ -302,7 +302,7 @@
     if (ge) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getSeqRecords(new Path(segment, CrawlDatum.GENERATE_DIR_NAME), key);
+          List<Writable> res = getSeqRecords(new Path(segment, CrawlDatum.GENERATE_DIR_NAME), key);
           results.put("ge", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
@@ -312,7 +312,7 @@
     if (pa) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getSeqRecords(new Path(segment, CrawlDatum.PARSE_DIR_NAME), key);
+          List<Writable> res = getSeqRecords(new Path(segment, CrawlDatum.PARSE_DIR_NAME), key);
           results.put("pa", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
@@ -322,7 +322,7 @@
     if (pd) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getMapRecords(new Path(segment, ParseData.DIR_NAME), key);
+          List<Writable> res = getMapRecords(new Path(segment, ParseData.DIR_NAME), key);
           results.put("pd", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
@@ -332,15 +332,15 @@
     if (pt) threads.add(new Thread() {
       public void run() {
         try {
-          List res = getMapRecords(new Path(segment, ParseText.DIR_NAME), key);
+          List<Writable> res = getMapRecords(new Path(segment, ParseText.DIR_NAME), key);
           results.put("pt", res);
         } catch (Exception e) {
           e.printStackTrace(LogUtil.getWarnStream(LOG));
         }
       }
     });
-    Iterator it = threads.iterator();
-    while (it.hasNext()) ((Thread)it.next()).start();
+    Iterator<Thread> it = threads.iterator();
+    while (it.hasNext()) it.next().start();
     int cnt;
     do {
       cnt = 0;
@@ -349,14 +349,14 @@
       } catch (Exception e) {};
       it = threads.iterator();
       while (it.hasNext()) {
-        if (((Thread)it.next()).isAlive()) cnt++;
+        if (it.next().isAlive()) cnt++;
       }
       if ((cnt > 0) && (LOG.isDebugEnabled())) {
         LOG.debug("(" + cnt + " to retrieve)");
       }
     } while (cnt > 0);
     for (int i = 0; i < keys.length; i++) {
-      List res = (List)results.get(keys[i][0]);
+      List<Writable> res = results.get(keys[i][0]);
       if (res != null && res.size() > 0) {
         for (int k = 0; k < res.size(); k++) {
           writer.write(keys[i][1]);
@@ -367,9 +367,9 @@
     }
   }
   
-  private List getMapRecords(Path dir, Text key) throws Exception {
+  private List<Writable> getMapRecords(Path dir, Text key) throws Exception {
     MapFile.Reader[] readers = MapFileOutputFormat.getReaders(fs, dir, getConf());
-    ArrayList res = new ArrayList();
+    ArrayList<Writable> res = new ArrayList<Writable>();
     Class keyClass = readers[0].getKeyClass();
     Class valueClass = readers[0].getValueClass();
     if (!keyClass.getName().equals("org.apache.hadoop.io.Text"))
@@ -384,9 +384,9 @@
     return res;
   }
 
-  private List getSeqRecords(Path dir, Text key) throws Exception {
+  private List<Writable> getSeqRecords(Path dir, Text key) throws Exception {
     SequenceFile.Reader[] readers = SequenceFileOutputFormat.getReaders(getConf(), dir);
-    ArrayList res = new ArrayList();
+    ArrayList<Writable> res = new ArrayList<Writable>();
     Class keyClass = readers[0].getKeyClass();
     Class valueClass = readers[0].getValueClass();
     if (!keyClass.getName().equals("org.apache.hadoop.io.Text"))
@@ -415,10 +415,10 @@
   
   SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
   
-  public void list(List dirs, Writer writer) throws Exception {
+  public void list(List<Path> dirs, Writer writer) throws Exception {
     writer.write("NAME\t\tGENERATED\tFETCHER START\t\tFETCHER END\t\tFETCHED\tPARSED\n");
     for (int i = 0; i < dirs.size(); i++) {
-      Path dir = (Path)dirs.get(i);
+      Path dir = dirs.get(i);
       SegmentReaderStats stats = new SegmentReaderStats();
       getStats(dir, stats);
       writer.write(dir.getName() + "\t");
@@ -554,7 +554,7 @@
         segmentReader.dump(new Path(input), new Path(output));
         return;
       case MODE_LIST:
-        ArrayList dirs = new ArrayList();
+        ArrayList<Path> dirs = new ArrayList<Path>();
         for (int i = 1; i < args.length; i++) {
           if (args[i] == null) continue;
           if (args[i].equals("-dir")) {
@@ -587,7 +587,7 @@
           usage();
           return;
         }
-        segmentReader.get(new Path(input), new Text(key), new OutputStreamWriter(System.out, "UTF-8"), new HashMap());
+        segmentReader.get(new Path(input), new Text(key), new OutputStreamWriter(System.out, "UTF-8"), new HashMap<String, List<Writable>>());
         return;
       default:
         System.err.println("Invalid operation: " + args[0]);
Index: src/java/org/apache/nutch/scoring/ScoringFilterException.java
===================================================================
--- src/java/org/apache/nutch/scoring/ScoringFilterException.java	(revision 562509)
+++ src/java/org/apache/nutch/scoring/ScoringFilterException.java	(working copy)
@@ -21,6 +21,7 @@
  * 
  * @author Andrzej Bialecki
  */
+@SuppressWarnings("serial")
 public class ScoringFilterException extends Exception {
 
   public ScoringFilterException() {
Index: src/java/org/apache/nutch/scoring/ScoringFilter.java
===================================================================
--- src/java/org/apache/nutch/scoring/ScoringFilter.java	(revision 562509)
+++ src/java/org/apache/nutch/scoring/ScoringFilter.java	(working copy)
@@ -136,7 +136,7 @@
    * links pointing to this page, found in the current update batch.
    * @throws ScoringFilterException
    */
-  public void updateDbScore(Text url, CrawlDatum old, CrawlDatum datum, List inlinked) throws ScoringFilterException;
+  public void updateDbScore(Text url, CrawlDatum old, CrawlDatum datum, List<CrawlDatum> inlinked) throws ScoringFilterException;
   
   /**
    * This method calculates a Lucene document boost.
Index: src/java/org/apache/nutch/scoring/ScoringFilters.java
===================================================================
--- src/java/org/apache/nutch/scoring/ScoringFilters.java	(revision 562509)
+++ src/java/org/apache/nutch/scoring/ScoringFilters.java	(working copy)
@@ -61,7 +61,8 @@
         ExtensionPoint point = PluginRepository.get(conf).getExtensionPoint(ScoringFilter.X_POINT_ID);
         if (point == null) throw new RuntimeException(ScoringFilter.X_POINT_ID + " not found.");
         Extension[] extensions = point.getExtensions();
-        HashMap filterMap = new HashMap();
+        HashMap<String, ScoringFilter> filterMap = 
+          new HashMap<String, ScoringFilter>();
         for (int i = 0; i < extensions.length; i++) {
           Extension extension = extensions[i];
           ScoringFilter filter = (ScoringFilter) extension.getExtensionInstance();
@@ -74,7 +75,7 @@
         } else {
           ScoringFilter[] filter = new ScoringFilter[orderedFilters.length];
           for (int i = 0; i < orderedFilters.length; i++) {
-            filter[i] = (ScoringFilter) filterMap.get(orderedFilters[i]);
+            filter[i] = filterMap.get(orderedFilters[i]);
           }
           conf.setObject(ScoringFilter.class.getName(), filter);
         }
@@ -110,7 +111,7 @@
   }
 
   /** Calculate updated page score during CrawlDb.update(). */
-  public void updateDbScore(Text url, CrawlDatum old, CrawlDatum datum, List inlinked) throws ScoringFilterException {
+  public void updateDbScore(Text url, CrawlDatum old, CrawlDatum datum, List<CrawlDatum> inlinked) throws ScoringFilterException {
     for (int i = 0; i < this.filters.length; i++) {
       this.filters[i].updateDbScore(url, old, datum, inlinked);
     }
Index: src/java/org/apache/nutch/html/Entities.java
===================================================================
--- src/java/org/apache/nutch/html/Entities.java	(revision 562509)
+++ src/java/org/apache/nutch/html/Entities.java	(working copy)
@@ -20,7 +20,8 @@
 import java.util.*;
 
 public class Entities {
-  static final Hashtable decoder = new Hashtable(300);
+  static final Hashtable<String, String> decoder = 
+    new Hashtable<String, String>(300);
   static final String[]  encoder = new String[0x100];
 
   static final String decode(String entity) {
@@ -37,7 +38,7 @@
 	new Character((char)Integer.parseInt(entity.substring(start), radix));
       return c.toString();
     } else {
-      String s = (String)decoder.get(entity);
+      String s = decoder.get(entity);
       if (s != null)
 	return s;
       else return "";
Index: src/java/org/apache/nutch/crawl/CrawlDbReader.java
===================================================================
--- src/java/org/apache/nutch/crawl/CrawlDbReader.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/CrawlDbReader.java	(working copy)
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Random;
 import java.util.TreeMap;
 
@@ -264,12 +265,12 @@
     Text key = new Text();
     LongWritable value = new LongWritable();
 
-    TreeMap stats = new TreeMap();
+    TreeMap<String, LongWritable> stats = new TreeMap<String, LongWritable>();
     for (int i = 0; i < readers.length; i++) {
       SequenceFile.Reader reader = readers[i];
       while (reader.next(key, value)) {
         String k = key.toString();
-        LongWritable val = (LongWritable) stats.get(k);
+        LongWritable val = stats.get(k);
         if (val == null) {
           val = new LongWritable();
           if (k.equals("scx")) val.set(Long.MIN_VALUE);
@@ -289,13 +290,12 @@
     
     if (LOG.isInfoEnabled()) {
       LOG.info("Statistics for CrawlDb: " + crawlDb);
-      LongWritable totalCnt = (LongWritable)stats.get("T");
+      LongWritable totalCnt = stats.get("T");
       stats.remove("T");
       LOG.info("TOTAL urls:\t" + totalCnt.get());
-      Iterator it = stats.keySet().iterator();
-      while (it.hasNext()) {
-        String k = (String) it.next();
-        LongWritable val = (LongWritable) stats.get(k);
+      for (Map.Entry<String, LongWritable> entry : stats.entrySet()) {
+        String k = entry.getKey();
+        LongWritable val = entry.getValue();
         if (k.equals("scn")) {
           LOG.info("min score:\t" + (float) (val.get() / 1000.0f));
         } else if (k.equals("scx")) {
Index: src/java/org/apache/nutch/crawl/LinkDb.java
===================================================================
--- src/java/org/apache/nutch/crawl/LinkDb.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/LinkDb.java	(working copy)
@@ -273,7 +273,7 @@
     Path segDir = null;
     final FileSystem fs = FileSystem.get(conf);
     Path db = new Path(args[0]);
-    ArrayList segs = new ArrayList();
+    ArrayList<Path> segs = new ArrayList<Path>();
     boolean filter = true;
     boolean normalize = true;
     boolean force = false;
@@ -299,7 +299,7 @@
       } else segs.add(new Path(args[i]));
     }
     try {
-      invert(db, (Path[])segs.toArray(new Path[segs.size()]), normalize, filter, force);
+      invert(db, segs.toArray(new Path[segs.size()]), normalize, filter, force);
       return 0;
     } catch (Exception e) {
       LOG.fatal("LinkDb: " + StringUtils.stringifyException(e));
Index: src/java/org/apache/nutch/crawl/LinkDbMerger.java
===================================================================
--- src/java/org/apache/nutch/crawl/LinkDbMerger.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/LinkDbMerger.java	(working copy)
@@ -79,10 +79,10 @@
       Inlinks inlinks = (Inlinks)values.next();
 
       int end = Math.min(maxInlinks - result.size(), inlinks.size());
-      Iterator it = inlinks.iterator();
+      Iterator<Inlink> it = inlinks.iterator();
       int i = 0;
       while(it.hasNext() && i++ < end) {
-        result.add((Inlink)it.next());
+        result.add(it.next());
       }
     }
     if (result.size() == 0) return;
@@ -149,7 +149,7 @@
       return -1;
     }
     Path output = new Path(args[0]);
-    ArrayList dbs = new ArrayList();
+    ArrayList<Path> dbs = new ArrayList<Path>();
     boolean normalize = false;
     boolean filter = false;
     for (int i = 1; i < args.length; i++) {
@@ -160,7 +160,7 @@
       } else dbs.add(new Path(args[i]));
     }
     try {
-      merge(output, (Path[])dbs.toArray(new Path[dbs.size()]), normalize, filter);
+      merge(output, dbs.toArray(new Path[dbs.size()]), normalize, filter);
       return 0;
     } catch (Exception e) {
       LOG.fatal("LinkDbMerger: " + StringUtils.stringifyException(e));
Index: src/java/org/apache/nutch/crawl/Inlinks.java
===================================================================
--- src/java/org/apache/nutch/crawl/Inlinks.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/Inlinks.java	(working copy)
@@ -31,7 +31,7 @@
 
   public void add(Inlinks inlinks) { this.inlinks.addAll(inlinks.inlinks); }
 
-  public Iterator iterator() {
+  public Iterator<Inlink> iterator() {
     return this.inlinks.iterator();
   }
   
@@ -49,16 +49,16 @@
 
   public void write(DataOutput out) throws IOException {
     out.writeInt(inlinks.size());
-    Iterator it = inlinks.iterator();
+    Iterator<Inlink> it = inlinks.iterator();
     while (it.hasNext()) {
-      ((Writable)it.next()).write(out);
+      it.next().write(out);
     }
   }
 
   public String toString() {
     StringBuffer buffer = new StringBuffer();
     buffer.append("Inlinks:\n");
-    Iterator it = inlinks.iterator();
+    Iterator<Inlink> it = inlinks.iterator();
     while (it.hasNext()) {
       buffer.append(" ");
       buffer.append(it.next());
@@ -70,11 +70,12 @@
   /** Return the set of anchor texts.  Only a single anchor with a given text
    * is permitted from a given domain. */
   public String[] getAnchors() throws IOException {
-    HashMap domainToAnchors = new HashMap();
-    ArrayList results = new ArrayList();
-    Iterator it = inlinks.iterator();
+    HashMap<String, Set<String>> domainToAnchors = 
+      new HashMap<String, Set<String>>();
+    ArrayList<String> results = new ArrayList<String>();
+    Iterator<Inlink> it = inlinks.iterator();
     while (it.hasNext()) {
-      Inlink inlink = (Inlink)it.next();
+      Inlink inlink = it.next();
       String anchor = inlink.getAnchor();
 
       if (anchor.length() == 0)                   // skip empty anchors
@@ -83,9 +84,9 @@
       try {
         domain = new URL(inlink.getFromUrl()).getHost();
       } catch (MalformedURLException e) {}
-      Set domainAnchors = (Set)domainToAnchors.get(domain);
+      Set<String> domainAnchors = domainToAnchors.get(domain);
       if (domainAnchors == null) {
-        domainAnchors = new HashSet();
+        domainAnchors = new HashSet<String>();
         domainToAnchors.put(domain, domainAnchors);
       }
       if (domainAnchors.add(anchor)) {            // new anchor from domain
@@ -93,7 +94,7 @@
       }
     }
 
-    return (String[])results.toArray(new String[results.size()]);
+    return results.toArray(new String[results.size()]);
   }
 
 
Index: src/java/org/apache/nutch/crawl/MapWritable.java
===================================================================
--- src/java/org/apache/nutch/crawl/MapWritable.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/MapWritable.java	(working copy)
@@ -25,6 +25,7 @@
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Map;
 import java.util.Set;
 
 // Commons Logging imports
@@ -75,9 +76,9 @@
 
   private ClassIdEntry fIdFirst;
 
-  private static HashMap CLASS_ID_MAP = new HashMap();
+  private static Map<Class, Byte> CLASS_ID_MAP = new HashMap<Class, Byte>();
 
-  private static HashMap ID_CLASS_MAP = new HashMap();
+  private static Map<Byte, Class> ID_CLASS_MAP = new HashMap<Byte, Class>();
 
   static {
 
@@ -173,8 +174,8 @@
     return fFirst == null;
   }
 
-  public Set keySet() {
-    HashSet set = new HashSet();
+  public Set<Writable> keySet() {
+    HashSet<Writable> set = new HashSet<Writable>();
     if (isEmpty()) return set;
     set.add(fFirst.fKey);
     KeyValueEntry entry = fFirst;
@@ -206,9 +207,9 @@
     if (map == null || map.size() == 0) {
       return;
     }
-    Iterator iterator = map.keySet().iterator();
+    Iterator<Writable> iterator = map.keySet().iterator();
     while (iterator.hasNext()) {
-      Writable key = (Writable) iterator.next();
+      Writable key = iterator.next();
       Writable value = map.get(key);
       put(key, value);
     }
@@ -242,8 +243,8 @@
     return fSize;
   }
 
-  public Collection values() {
-    LinkedList list = new LinkedList();
+  public Collection<Writable> values() {
+    LinkedList<Writable> list = new LinkedList<Writable>();
     KeyValueEntry entry = fFirst;
     while (entry != null) {
       list.add(entry.fValue);
@@ -256,13 +257,13 @@
     if (obj instanceof MapWritable) {
       MapWritable map = (MapWritable) obj;
       if (fSize != map.fSize) return false;
-      HashSet set1 = new HashSet();
+      HashSet<KeyValueEntry> set1 = new HashSet<KeyValueEntry>();
       KeyValueEntry e1 = fFirst;
       while (e1 != null) {
         set1.add(e1);
         e1 = e1.fNextEntry;
       }
-      HashSet set2 = new HashSet();
+      HashSet<KeyValueEntry> set2 = new HashSet<KeyValueEntry>();
       KeyValueEntry e2 = map.fFirst;
       while (e2 != null) {
         set2.add(e2);
@@ -401,7 +402,7 @@
   }
 
   private byte getClassId(Class clazz) {
-    Byte classId = (Byte) CLASS_ID_MAP.get(clazz);
+    Byte classId = CLASS_ID_MAP.get(clazz);
     if (classId != null) {
       return classId.byteValue();
     }
@@ -448,7 +449,7 @@
   }
 
   private Class getClass(final byte id) throws IOException {
-    Class clazz = (Class) ID_CLASS_MAP.get(new Byte(id));
+    Class clazz = ID_CLASS_MAP.get(new Byte(id));
     if (clazz == null) {
       ClassIdEntry entry = fIdFirst;
       while (entry != null) {
Index: src/java/org/apache/nutch/crawl/LinkDbFilter.java
===================================================================
--- src/java/org/apache/nutch/crawl/LinkDbFilter.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/LinkDbFilter.java	(working copy)
@@ -52,8 +52,6 @@
   private URLFilters filters;
 
   private URLNormalizers normalizers;
-
-  private JobConf jobConf;
   
   private String scope;
   
@@ -62,7 +60,6 @@
   private Text newKey = new Text();
   
   public void configure(JobConf job) {
-    this.jobConf = job;
     filter = job.getBoolean(URL_FILTERING, false);
     normalize = job.getBoolean(URL_NORMALIZING, false);
     if (filter) {
@@ -97,10 +94,10 @@
     }
     if (url == null) return; // didn't pass the filters
     Inlinks inlinks = (Inlinks)value;
-    Iterator it = inlinks.iterator();
+    Iterator<Inlink> it = inlinks.iterator();
     String fromUrl = null;
     while (it.hasNext()) {
-      Inlink inlink = (Inlink)it.next();
+      Inlink inlink = it.next();
       fromUrl = inlink.getFromUrl();
       if (normalize) {
         try {
Index: src/java/org/apache/nutch/crawl/CrawlDbMerger.java
===================================================================
--- src/java/org/apache/nutch/crawl/CrawlDbMerger.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/CrawlDbMerger.java	(working copy)
@@ -150,7 +150,7 @@
       return -1;
     }
     Path output = new Path(args[0]);
-    ArrayList dbs = new ArrayList();
+    ArrayList<Path> dbs = new ArrayList<Path>();
     boolean filter = false;
     boolean normalize = false;
     for (int i = 1; i < args.length; i++) {
@@ -164,7 +164,7 @@
       dbs.add(new Path(args[i]));
     }
     try {
-      merge(output, (Path[]) dbs.toArray(new Path[dbs.size()]), normalize, filter);
+      merge(output, dbs.toArray(new Path[dbs.size()]), normalize, filter);
       return 0;
     } catch (Exception e) {
       LOG.fatal("CrawlDb merge: " + StringUtils.stringifyException(e));
Index: src/java/org/apache/nutch/crawl/CrawlDbFilter.java
===================================================================
--- src/java/org/apache/nutch/crawl/CrawlDbFilter.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/CrawlDbFilter.java	(working copy)
@@ -51,15 +51,12 @@
   private URLFilters filters;
 
   private URLNormalizers normalizers;
-
-  private JobConf jobConf;
   
   private String scope;
 
   public static final Log LOG = LogFactory.getLog(CrawlDbFilter.class);
 
   public void configure(JobConf job) {
-    this.jobConf = job;
     urlFiltering = job.getBoolean(URL_FILTERING, false);
     urlNormalizers = job.getBoolean(URL_NORMALIZING, false);
     if (urlFiltering) {
Index: src/java/org/apache/nutch/crawl/Generator.java
===================================================================
--- src/java/org/apache/nutch/crawl/Generator.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/Generator.java	(working copy)
@@ -86,7 +86,8 @@
     private long curTime;
     private long limit;
     private long count;
-    private HashMap hostCounts = new HashMap();
+    private HashMap<String, IntWritable> hostCounts = 
+      new HashMap<String, IntWritable>();
     private int maxPerHost;
     private Partitioner hostPartitioner = new PartitionUrlByHost();
     private URLFilters filters;
@@ -98,7 +99,6 @@
     private long dnsFailure = 0L;
     private boolean filter;
     private long genDelay;
-    private boolean runUpdatedb;
     private FetchSchedule schedule;
 
     public void configure(JobConf job) {
@@ -114,7 +114,6 @@
       genDelay = job.getLong(CRAWL_GEN_DELAY, 7L) * 3600L * 24L * 1000L;
       long time = job.getLong(Nutch.GENERATE_TIME_KEY, 0L);
       if (time > 0) genTime.set(time);
-      runUpdatedb = job.getBoolean(GENERATE_UPDATE_CRAWLDB, false);
       schedule = FetchScheduleFactory.getFetchSchedule(job);
     }
 
@@ -217,7 +216,7 @@
                 StringUtils.stringifyException(e) + ")");
             continue;
           }
-          IntWritable hostCount = (IntWritable)hostCounts.get(host);
+          IntWritable hostCount = hostCounts.get(host);
           if (hostCount == null) {
             hostCount = new IntWritable();
             hostCounts.put(host, hostCount);
Index: src/java/org/apache/nutch/crawl/LinkDbReader.java
===================================================================
--- src/java/org/apache/nutch/crawl/LinkDbReader.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/LinkDbReader.java	(working copy)
@@ -101,8 +101,6 @@
 
     job.addInputPath(new Path(linkdb, LinkDb.CURRENT_NAME));
     job.setInputFormat(SequenceFileInputFormat.class);
-    job.setInputKeyClass(Text.class);
-    job.setInputValueClass(Inlinks.class);
 
     job.setOutputPath(outFolder);
     job.setOutputFormat(TextOutputFormat.class);
@@ -134,7 +132,7 @@
         if (links == null) {
           System.out.println(" - no link information.");
         } else {
-          Iterator it = links.iterator();
+          Iterator<Inlink> it = links.iterator();
           while (it.hasNext()) {
             System.out.println(it.next().toString());
           }
Index: src/java/org/apache/nutch/crawl/TextProfileSignature.java
===================================================================
--- src/java/org/apache/nutch/crawl/TextProfileSignature.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/TextProfileSignature.java	(working copy)
@@ -66,7 +66,7 @@
   public byte[] calculate(Content content, Parse parse) {
     int MIN_TOKEN_LEN = getConf().getInt("db.signature.text_profile.min_token_len", 2);
     float QUANT_RATE = getConf().getFloat("db.signature.text_profile.quant_rate", 0.01f);
-    HashMap tokens = new HashMap();
+    HashMap<String, Token> tokens = new HashMap<String, Token>();
     String text = null;
     if (parse != null) text = parse.getText();
     if (text == null || text.length() == 0) return fallback.calculate(content, parse);
@@ -81,7 +81,7 @@
           if (curToken.length() > MIN_TOKEN_LEN) {
             // add it
             String s = curToken.toString();
-            Token tok = (Token)tokens.get(s);
+            Token tok = tokens.get(s);
             if (tok == null) {
               tok = new Token(0, s);
               tokens.put(s, tok);
@@ -97,7 +97,7 @@
     if (curToken.length() > MIN_TOKEN_LEN) {
       // add it
       String s = curToken.toString();
-      Token tok = (Token)tokens.get(s);
+      Token tok = tokens.get(s);
       if (tok == null) {
         tok = new Token(0, s);
         tokens.put(s, tok);
@@ -105,8 +105,8 @@
       tok.cnt++;
       if (tok.cnt > maxFreq) maxFreq = tok.cnt;
     }
-    Iterator it = tokens.values().iterator();
-    ArrayList profile = new ArrayList();
+    Iterator<Token> it = tokens.values().iterator();
+    ArrayList<Token> profile = new ArrayList<Token>();
     // calculate the QUANT value
     int QUANT = Math.round(maxFreq * QUANT_RATE);
     if (QUANT < 2) {
@@ -114,7 +114,7 @@
       else QUANT = 1;
     }
     while(it.hasNext()) {
-      Token t = (Token)it.next();
+      Token t = it.next();
       // round down to the nearest QUANT
       t.cnt = (t.cnt / QUANT) * QUANT;
       // discard the frequencies below the QUANT
@@ -127,7 +127,7 @@
     StringBuffer newText = new StringBuffer();
     it = profile.iterator();
     while (it.hasNext()) {
-      Token t = (Token)it.next();
+      Token t = it.next();
       if (newText.length() > 0) newText.append("\n");
       newText.append(t.toString());
     }
@@ -148,10 +148,8 @@
     }
   }
   
-  private static class TokenComparator implements Comparator {
-    public int compare(Object o1, Object o2) {
-      Token t1 = (Token)o1;
-      Token t2 = (Token)o2;
+  private static class TokenComparator implements Comparator<Token> {
+    public int compare(Token t1, Token t2) {
       return t2.cnt - t1.cnt;
     }
   }
@@ -159,7 +157,7 @@
   public static void main(String[] args) throws Exception {
     TextProfileSignature sig = new TextProfileSignature();
     sig.setConf(NutchConfiguration.create());
-    HashMap res = new HashMap();
+    HashMap<String, byte[]> res = new HashMap<String, byte[]>();
     File[] files = new File(args[0]).listFiles();
     for (int i = 0; i < files.length; i++) {
       FileInputStream fis = new FileInputStream(files[i]);
@@ -174,10 +172,10 @@
       byte[] signature = sig.calculate(null, new ParseImpl(text.toString(), null));
       res.put(files[i].toString(), signature);
     }
-    Iterator it = res.keySet().iterator();
+    Iterator<String> it = res.keySet().iterator();
     while (it.hasNext()) {
-      String name = (String)it.next();
-      byte[] signature = (byte[])res.get(name);
+      String name = it.next();
+      byte[] signature = res.get(name);
       System.out.println(name + "\t" + StringUtil.toHexString(signature));
     }
   }
Index: src/java/org/apache/nutch/crawl/CrawlDbReducer.java
===================================================================
--- src/java/org/apache/nutch/crawl/CrawlDbReducer.java	(revision 562509)
+++ src/java/org/apache/nutch/crawl/CrawlDbReducer.java	(working copy)
@@ -37,7 +37,7 @@
   
   private int retryMax;
   private CrawlDatum result = new CrawlDatum();
-  private ArrayList linked = new ArrayList();
+  private ArrayList<CrawlDatum> linked = new ArrayList<CrawlDatum>();
   private ScoringFilters scfilters = null;
   private boolean additionsAllowed;
   private float maxInterval;
@@ -101,7 +101,7 @@
     
     // if there is no fetched datum, perhaps there is a link
     if (fetch == null && linked.size() > 0) {
-      fetch = (CrawlDatum)linked.get(0);
+      fetch = linked.get(0);
     }
     
     // still no new data - record only unchanged old data, if exists, and return
Index: src/java/org/apache/nutch/parse/ParsePluginsReader.java
===================================================================
--- src/java/org/apache/nutch/parse/ParsePluginsReader.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/ParsePluginsReader.java	(working copy)
@@ -21,7 +21,6 @@
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
@@ -122,7 +121,7 @@
     Element parsePlugins = document.getDocumentElement();
     
     // build up the alias hash map
-    Map aliases = getAliases(parsePlugins);
+    Map<String, String> aliases = getAliases(parsePlugins);
     // And store it on the parse plugin list
     pList.setAliases(aliases);
      
@@ -142,12 +141,12 @@
       // a separate list, and then insert them into the final list at the
       // order specified
       if (pluginList != null && pluginList.getLength() > 0) {
-        List plugList = new ArrayList(pluginList.getLength());
+        List<String> plugList = new ArrayList<String>(pluginList.getLength());
         
         for (int j = 0; j<pluginList.getLength(); j++) {
           Element plugin = (Element) pluginList.item(j);
           String pluginId = plugin.getAttribute("id");
-          String extId = (String) aliases.get(pluginId);
+          String extId = aliases.get(pluginId);
           if (extId == null) {
             // Assume an extension id is directly specified
             extId = pluginId;
@@ -209,16 +208,15 @@
     
     ParsePluginList prefs = reader.parse(NutchConfiguration.create());
     
-    for (Iterator i = prefs.getSupportedMimeTypes().iterator(); i.hasNext();) {
-      String mimeType = (String) i.next();
+    for (String mimeType : prefs.getSupportedMimeTypes()) {
       
       System.out.println("MIMETYPE: " + mimeType);
-      List plugList = prefs.getPluginList(mimeType);
+      List<String> plugList = prefs.getPluginList(mimeType);
       
       System.out.println("EXTENSION IDs:");
       
-      for (Iterator j = plugList.iterator(); j.hasNext();) {
-        System.out.println((String) j.next());
+      for (String j : plugList) {
+        System.out.println(j);
       }
     }
     
@@ -239,9 +237,9 @@
     fParsePluginsFile = parsePluginsFile;
   }
   
-  private Map getAliases(Element parsePluginsRoot) {
+  private Map<String, String> getAliases(Element parsePluginsRoot) {
 
-    Map aliases = new HashMap();
+    Map<String, String> aliases = new HashMap<String, String>();
     NodeList aliasRoot = parsePluginsRoot.getElementsByTagName("aliases");
 	  
     if (aliasRoot == null || (aliasRoot != null && aliasRoot.getLength() == 0)) {
Index: src/java/org/apache/nutch/parse/ParseUtil.java
===================================================================
--- src/java/org/apache/nutch/parse/ParseUtil.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/ParseUtil.java	(working copy)
@@ -40,7 +40,6 @@
   
   /* our log stream */
   public static final Log LOG = LogFactory.getLog(ParseUtil.class);
-  private Configuration conf;
   private ParserFactory parserFactory;
   
   /**
@@ -48,7 +47,6 @@
    * @param conf
    */
   public ParseUtil(Configuration conf) {
-    this.conf = conf;
     this.parserFactory = new ParserFactory(conf);
   }
   
Index: src/java/org/apache/nutch/parse/OutlinkExtractor.java
===================================================================
--- src/java/org/apache/nutch/parse/OutlinkExtractor.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/OutlinkExtractor.java	(working copy)
@@ -83,7 +83,7 @@
    */
   public static Outlink[] getOutlinks(final String plainText, String anchor, Configuration conf) {
     long start = System.currentTimeMillis();
-    final List outlinks = new ArrayList();
+    final List<Outlink> outlinks = new ArrayList<Outlink>();
 
     try {
       final PatternCompiler cp = new Perl5Compiler();
@@ -111,7 +111,6 @@
         result = matcher.getMatch();
         url = result.group(0);
         try {
-          Outlink outlink = new Outlink(url, anchor, conf);
           outlinks.add(new Outlink(url, anchor, conf));
         } catch (MalformedURLException mue) {
           LOG.warn("Invalid url: '" + url + "', skipping.");
@@ -126,7 +125,7 @@
 
     //create array of the Outlinks
     if (outlinks != null && outlinks.size() > 0) {
-      retval = (Outlink[]) outlinks.toArray(new Outlink[0]);
+      retval = outlinks.toArray(new Outlink[0]);
     } else {
       retval = new Outlink[0];
     }
Index: src/java/org/apache/nutch/parse/ParserFactory.java
===================================================================
--- src/java/org/apache/nutch/parse/ParserFactory.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/ParserFactory.java	(working copy)
@@ -102,7 +102,7 @@
   public Parser[] getParsers(String contentType, String url)
   throws ParserNotFound {
     
-    List parsers = null;
+    List<Parser> parsers = null;
     List parserExts = null;
     
     // TODO once the MimeTypes is available
@@ -118,7 +118,7 @@
       throw new ParserNotFound(url, contentType);
     }
 
-    parsers = new Vector(parserExts.size());
+    parsers = new Vector<Parser>(parserExts.size());
     for (Iterator i=parserExts.iterator(); i.hasNext(); ){
       Extension ext = (Extension) i.next();
       Parser p = null;
@@ -142,7 +142,7 @@
         }
       }
     }
-    return (Parser[]) parsers.toArray(new Parser[]{});
+    return parsers.toArray(new Parser[]{});
   }
     
   /**
@@ -209,7 +209,7 @@
    * @return a list of extensions to be used for this contentType.
    *         If none, returns <code>null</code>.
    */
-  protected List getExtensions(String contentType) {
+  protected List<Extension> getExtensions(String contentType) {
     
     // First of all, tries to clean the content-type
     String type = null;
@@ -223,7 +223,7 @@
       type = contentType;
     }
 
-    List extensions = (List) this.conf.getObject(type);
+    List<Extension> extensions = (List<Extension>) this.conf.getObject(type);
 
     // Just compare the reference:
     // if this is the empty list, we know we will find no extension.
@@ -253,13 +253,15 @@
    * @return List - List of extensions to be used for this contentType.
    *                If none, returns null.
    */
-  private List findExtensions(String contentType) {
+  private List<Extension> findExtensions(String contentType) {
     
     Extension[] extensions = this.extensionPoint.getExtensions();
     
     // Look for a preferred plugin.
-    List parsePluginList = this.parsePluginList.getPluginList(contentType);
-    List extensionList = matchExtensions(parsePluginList, extensions, contentType);
+    List<String> parsePluginList = 
+      this.parsePluginList.getPluginList(contentType);
+    List<Extension> extensionList = 
+      matchExtensions(parsePluginList, extensions, contentType);
     if (extensionList != null) {
       return extensionList;
     }
@@ -283,15 +285,14 @@
    * @return List - List of extensions to be used for this contentType.
    *                If none, returns null.
    */
-  private List matchExtensions(List plugins,
+  private List<Extension> matchExtensions(List<String> plugins,
                                Extension[] extensions,
                                String contentType) {
     
-    List extList = new ArrayList();
+    List<Extension> extList = new ArrayList<Extension>();
     if (plugins != null) {
       
-      for (Iterator i = plugins.iterator(); i.hasNext();) {
-        String parsePluginId = (String) i.next();
+      for (String parsePluginId : plugins) {
         
         Extension ext = getExtension(extensions, parsePluginId, contentType);
         // the extension returned may be null
@@ -346,7 +347,7 @@
         if (extensions[i].getAttribute("contentType") != null
             && extensions[i].getAttribute("contentType").equals(
                 contentType)) {
-          extList.add(extensions[i].getId());
+          extList.add(extensions[i]);
         }
       }
       
@@ -393,7 +394,7 @@
   }
   
   private Extension getExtensionFromAlias(Extension[] list, String id) {
-    return getExtension(list, (String) parsePluginList.getAliases().get(id));
+    return getExtension(list, parsePluginList.getAliases().get(id));
   }
 
 }
Index: src/java/org/apache/nutch/parse/ParsePluginList.java
===================================================================
--- src/java/org/apache/nutch/parse/ParsePluginList.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/ParsePluginList.java	(working copy)
@@ -35,37 +35,37 @@
 class ParsePluginList {
   
   /* a map to link mimeType to an ordered list of parsing plugins */
-  private Map fMimeTypeToPluginMap = null;
+  private Map<String, List<String>> fMimeTypeToPluginMap = null;
   
   /* A list of aliases */
-  private Map aliases = null;
+  private Map<String, String> aliases = null;
   
   
   /**
    * Constructs a new ParsePluginList
    */
   ParsePluginList() {
-    fMimeTypeToPluginMap = new HashMap();
-    aliases = new HashMap();
+    fMimeTypeToPluginMap = new HashMap<String, List<String>>();
+    aliases = new HashMap<String, String>();
   }
   
-  List getPluginList(String mimeType) {
-    return (List) fMimeTypeToPluginMap.get(mimeType);
+  List<String> getPluginList(String mimeType) {
+    return fMimeTypeToPluginMap.get(mimeType);
   }
 
-  void setAliases(Map aliases) {
+  void setAliases(Map<String, String> aliases) {
     this.aliases = aliases;
   }
   
-  Map getAliases() {
+  Map<String, String> getAliases() {
     return aliases;
   }
   
-  void setPluginList(String mimeType, List l) {
+  void setPluginList(String mimeType, List<String> l) {
     fMimeTypeToPluginMap.put(mimeType, l);
   }
   
-  List getSupportedMimeTypes() {
+  List<String> getSupportedMimeTypes() {
     return Arrays.asList(fMimeTypeToPluginMap.keySet().toArray(
             new String[] {}));
   }
Index: src/java/org/apache/nutch/parse/HtmlParseFilters.java
===================================================================
--- src/java/org/apache/nutch/parse/HtmlParseFilters.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/HtmlParseFilters.java	(working copy)
@@ -22,7 +22,6 @@
 import org.apache.nutch.protocol.Content;
 import org.apache.nutch.plugin.*;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
 
 import org.w3c.dom.DocumentFragment;
 
@@ -34,7 +33,8 @@
   public HtmlParseFilters(Configuration conf) {
         this.htmlParseFilters = (HtmlParseFilter[]) conf.getObject(HtmlParseFilter.class.getName());
         if (htmlParseFilters == null) {
-            HashMap filters = new HashMap();
+            HashMap<String, HtmlParseFilter> filters = 
+              new HashMap<String, HtmlParseFilter>();
             try {
                 ExtensionPoint point = PluginRepository.get(conf).getExtensionPoint(HtmlParseFilter.X_POINT_ID);
                 if (point == null)
@@ -47,7 +47,7 @@
                         filters.put(parseFilter.getClass().getName(), parseFilter);
                     }
                 }
-                HtmlParseFilter[] htmlParseFilters = (HtmlParseFilter[]) filters.values().toArray(new HtmlParseFilter[filters.size()]);
+                HtmlParseFilter[] htmlParseFilters = filters.values().toArray(new HtmlParseFilter[filters.size()]);
                 conf.setObject(HtmlParseFilter.class.getName(), htmlParseFilters);
             } catch (PluginRuntimeException e) {
                 throw new RuntimeException(e);
Index: src/java/org/apache/nutch/parse/ParseException.java
===================================================================
--- src/java/org/apache/nutch/parse/ParseException.java	(revision 562509)
+++ src/java/org/apache/nutch/parse/ParseException.java	(working copy)
@@ -17,6 +17,7 @@
 
 package org.apache.nutch.parse;
 
+@SuppressWarnings("serial")
 public class ParseException extends Exception {
 
   public ParseException() {
Index: src/java/org/apache/nutch/util/ThreadPool.java
===================================================================
--- src/java/org/apache/nutch/util/ThreadPool.java	(revision 562509)
+++ src/java/org/apache/nutch/util/ThreadPool.java	(working copy)
@@ -58,7 +58,7 @@
 
     int numThreads;
     boolean running = false;
-    Vector jobs;
+    Vector<Runnable> jobs;
 
     /**
      * Creates a pool of numThreads size.
@@ -67,7 +67,7 @@
      */
     public ThreadPool(int numThreads) {
         this.numThreads = numThreads;
-        jobs = new Vector(37);
+        jobs = new Vector<Runnable>(37);
         running = true;
 
         for (int i = 0; i < numThreads; i++) {
@@ -99,7 +99,7 @@
                 }
 
                 if (jobs.size() > 0) {
-                    job = (Runnable) jobs.firstElement();
+                    job = jobs.firstElement();
                     jobs.removeElementAt(0);
                 }
             }
Index: src/java/org/apache/nutch/util/mime/MimeTypesReader.java
===================================================================
--- src/java/org/apache/nutch/util/mime/MimeTypesReader.java	(revision 562509)
+++ src/java/org/apache/nutch/util/mime/MimeTypesReader.java	(working copy)
@@ -89,7 +89,7 @@
     
     /** Read Element named mime-types. */
     private MimeType[] readMimeTypes(Element element) {
-        ArrayList types = new ArrayList();
+        ArrayList<MimeType> types = new ArrayList<MimeType>();
         NodeList nodes = element.getChildNodes();
         for (int i=0; i<nodes.getLength(); i++) {
             Node node = nodes.item(i);
@@ -101,7 +101,7 @@
                 }
             }
         }
-        return (MimeType[]) types.toArray(new MimeType[types.size()]);
+        return types.toArray(new MimeType[types.size()]);
     }
     
     /** Read Element named mime-type. */
Index: src/java/org/apache/nutch/util/mime/MimeTypeException.java
===================================================================
--- src/java/org/apache/nutch/util/mime/MimeTypeException.java	(revision 562509)
+++ src/java/org/apache/nutch/util/mime/MimeTypeException.java	(working copy)
@@ -22,6 +22,7 @@
  * @author Hari Kodungallur
  * @author Jerome Charron - http://frutch.free.fr/
  */
+@SuppressWarnings("serial")
 public class MimeTypeException extends Exception {
 
     /**
Index: src/java/org/apache/nutch/util/mime/MimeType.java
===================================================================
--- src/java/org/apache/nutch/util/mime/MimeType.java	(revision 562509)
+++ src/java/org/apache/nutch/util/mime/MimeType.java	(working copy)
@@ -51,10 +51,10 @@
     private String description = null;
     
     /** The Mime-Type associated extensions */
-    private ArrayList extensions = null;
+    private ArrayList<String> extensions = null;
     
     /** The magic bytes associated to this Mime-Type */
-    private ArrayList magics = null;
+    private ArrayList<Magic> magics = null;
     
     /** The minimum length of data to provides for magic analyzis */
     private int minLength = 0;
@@ -109,8 +109,8 @@
         this.name = primary + SEPARATOR + clearedSub;
         this.primary = primary;
         this.sub = clearedSub;
-        this.extensions = new ArrayList();
-        this.magics = new ArrayList();
+        this.extensions = new ArrayList<String>();
+        this.magics = new ArrayList<Magic>();
     }
 
     /**
@@ -209,7 +209,7 @@
      * @return the extensions associated to this mime-type.
      */
     String[] getExtensions() {
-        return (String[]) extensions.toArray(new String[extensions.size()]);
+        return extensions.toArray(new String[extensions.size()]);
     }
     
     void addMagic(int offset, String type, String magic) {
@@ -247,7 +247,7 @@
         
         Magic tested = null;
         for (int i=0; i<magics.size(); i++) {
-            tested = (Magic) magics.get(i);
+            tested = magics.get(i);
             if (tested.matches(data)) {
                 return true;
             }
Index: src/java/org/apache/nutch/util/mime/MimeTypes.java
===================================================================
--- src/java/org/apache/nutch/util/mime/MimeTypes.java	(revision 562509)
+++ src/java/org/apache/nutch/util/mime/MimeTypes.java	(working copy)
@@ -42,16 +42,18 @@
     public final static String DEFAULT = "application/octet-stream";
 
     /** All the registered MimeTypes */
-    private ArrayList types = new ArrayList();
+    private ArrayList<MimeType> types = new ArrayList<MimeType>();
 
     /** All the registered MimeType indexed by name */
-    private HashMap typesIdx = new HashMap();
+    private HashMap<String, MimeType> typesIdx = 
+      new HashMap<String, MimeType>();
 
     /** MimeTypes indexed on the file extension */
-    private Map extIdx = new HashMap();
+    private Map<String, List<MimeType>> extIdx = 
+      new HashMap<String, List<MimeType>>();
 
     /** List of MimeTypes containing a magic char sequence */
-    private List magicsIdx = new ArrayList();
+    private List<MimeType> magicsIdx = new ArrayList<MimeType>();
 
     /** The minimum length of data to provide to check all MimeTypes */
     private int minLength = 0;
@@ -63,7 +65,7 @@
      * Key is the specified file path in the {@link #get(String)} method.
      * Value is the associated MimeType instance.
      */
-    private static Map instances = new HashMap();
+    private static Map<String, MimeTypes> instances = new HashMap<String, MimeTypes>();
     
     
     /** Should never be instanciated from outside */
@@ -81,7 +83,7 @@
     public static MimeTypes get(String filepath) {
         MimeTypes instance = null;
         synchronized(instances) {
-            instance = (MimeTypes) instances.get(filepath);
+            instance = instances.get(filepath);
             if (instance == null) {
                 instance = new MimeTypes(filepath, null);
                 instances.put(filepath, instance);
@@ -99,7 +101,7 @@
     public static MimeTypes get(String filepath, Log logger) {
         MimeTypes instance = null;
         synchronized(instances) {
-            instance = (MimeTypes) instances.get(filepath);
+            instance = instances.get(filepath);
             if (instance == null) {
                 instance = new MimeTypes(filepath, logger);
                 instances.put(filepath, instance);
@@ -164,7 +166,7 @@
         if ((data == null) || (data.length < 1)) {
             return null;
         }
-        Iterator iter = magicsIdx.iterator();
+        Iterator<MimeType> iter = magicsIdx.iterator();
         MimeType type = null;
         // TODO: This is a very naive first approach (scanning all the magic
         //       bytes since one is matching.
@@ -173,7 +175,7 @@
         // TODO: A second improvement could be to search for the most qualified
         //       (the longuest) magic sequence (not the first that is matching).
         while (iter.hasNext()) {
-            type = (MimeType) iter.next();
+            type = iter.next();
             if (type.matches(data)) {
                 return type;
             }
@@ -214,7 +216,7 @@
     * Return a MimeType from its name.
     */
    public MimeType forName(String name) {
-      return (MimeType) typesIdx.get(name);
+      return typesIdx.get(name);
    }
 
     /**
@@ -254,11 +256,11 @@
         String[] exts = type.getExtensions();
         if (exts != null) {
             for (int i=0; i<exts.length; i++) {
-                List list = (List) extIdx.get(exts[i]);
+                List<MimeType> list = extIdx.get(exts[i]);
                 if (list == null) {
                     // No type already registered for this extension...
                     // So, create a list of types
-                    list = new ArrayList();
+                    list = new ArrayList<MimeType>();
                     extIdx.put(exts[i], list);
                 }
                 list.add(type);
@@ -275,17 +277,17 @@
      * (many MimeTypes can have the same registered extensions).
      */
     private MimeType[] getMimeTypes(String name) {
-        List mimeTypes = null;
+        List<MimeType> mimeTypes = null;
         int index = name.lastIndexOf('.');
         if ((index != -1) && (index != name.length()-1)) {
             // There's an extension, so try to find
             // the corresponding mime-types
             String ext = name.substring(index + 1);
-            mimeTypes = (List) extIdx.get(ext);
+            mimeTypes = extIdx.get(ext);
         }
         
         return (mimeTypes != null)
-                    ? (MimeType[]) mimeTypes.toArray(new MimeType[mimeTypes.size()])
+                    ? mimeTypes.toArray(new MimeType[mimeTypes.size()])
                     : null;
     }
     
Index: src/java/org/apache/nutch/util/FibonacciHeap.java
===================================================================
--- src/java/org/apache/nutch/util/FibonacciHeap.java	(revision 562509)
+++ src/java/org/apache/nutch/util/FibonacciHeap.java	(working copy)
@@ -31,7 +31,7 @@
  */
 public class FibonacciHeap {
   private FibonacciHeapNode min;
-  private HashMap itemsToNodes;
+  private HashMap<Object, FibonacciHeapNode> itemsToNodes;
 
   // private node class
   private static class FibonacciHeapNode {
@@ -67,7 +67,7 @@
    */
   public FibonacciHeap() {
     this.min= null;
-    this.itemsToNodes= new HashMap();
+    this.itemsToNodes= new HashMap<Object, FibonacciHeapNode>();
   }
 
   /**
@@ -230,7 +230,7 @@
    */
   public void decreaseKey(Object item, int priority) {
     FibonacciHeapNode node= 
-      (FibonacciHeapNode) itemsToNodes.get(item);
+      itemsToNodes.get(item);
     if (node == null) 
       throw new IllegalStateException("No such element: " + item);
     if (node.priority < priority) 
Index: src/java/org/apache/nutch/util/NodeWalker.java
===================================================================
--- src/java/org/apache/nutch/util/NodeWalker.java	(revision 562509)
+++ src/java/org/apache/nutch/util/NodeWalker.java	(working copy)
@@ -17,7 +17,6 @@
 public class NodeWalker {
 
   // the root node the the stack holding the nodes
-  private Node rootNode;
   private Node currentNode;
   private NodeList currentChildren;
   private Stack<Node> nodes;
@@ -28,7 +27,6 @@
    * @param rootNode
    */
   public NodeWalker(Node rootNode) {
-    this.rootNode = rootNode;
 
     nodes = new Stack<Node>();
     nodes.add(rootNode);
Index: src/java/org/apache/nutch/util/TrieStringMatcher.java
===================================================================
--- src/java/org/apache/nutch/util/TrieStringMatcher.java	(revision 562509)
+++ src/java/org/apache/nutch/util/TrieStringMatcher.java	(working copy)
@@ -37,9 +37,9 @@
   /**
    * Node class for the character tree.
    */
-  protected class TrieNode implements Comparable {
+  protected class TrieNode implements Comparable<TrieNode> {
     protected TrieNode[] children;
-    protected LinkedList childrenList;
+    protected LinkedList<TrieNode> childrenList;
     protected char nodeChar;
     protected boolean terminal;
 
@@ -52,7 +52,7 @@
     TrieNode(char nodeChar, boolean isTerminal) {
       this.nodeChar= nodeChar;
       this.terminal= isTerminal;
-      this.childrenList= new LinkedList();
+      this.childrenList= new LinkedList<TrieNode>();
     }
 
     /**
@@ -71,7 +71,7 @@
      */
     TrieNode getChildAddIfNotPresent(char nextChar, boolean isTerminal) {
       if (childrenList == null) {
-        childrenList= new LinkedList();
+        childrenList= new LinkedList<TrieNode>();
         childrenList.addAll(Arrays.asList(children));
         children= null;
       }
@@ -82,10 +82,10 @@
         return newNode;
       }
 
-      ListIterator iter= childrenList.listIterator();
-      TrieNode node= (TrieNode) iter.next();
+      ListIterator<TrieNode> iter= childrenList.listIterator();
+      TrieNode node= iter.next();
       while ( (node.nodeChar < nextChar) && iter.hasNext() ) 
-        node= (TrieNode) iter.next();
+        node= iter.next();
                         
       if (node.nodeChar == nextChar) {
         node.terminal= node.terminal | isTerminal;
@@ -107,8 +107,7 @@
      */
     TrieNode getChild(char nextChar) {
       if (children == null) {
-        children= (TrieNode[]) 
-          childrenList.toArray(new TrieNode[childrenList.size()]);
+        children= childrenList.toArray(new TrieNode[childrenList.size()]);
         childrenList= null;
         Arrays.sort(children);
       }
@@ -133,8 +132,7 @@
       return null;
     }
 
-    public int compareTo(Object o) {
-      TrieNode other= (TrieNode) o;
+    public int compareTo(TrieNode other) {
       if (this.nodeChar < other.nodeChar) 
         return -1;
       if (this.nodeChar == other.nodeChar) 
