carfield.com.hk default.dfPackage 2000-06-13T16:00:00Z 2000-06-13T16:00:00Z <br/><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-06-13T16:00:00Z ATN.class 2000-06-12T16:00:00Z 2000-06-12T16:00:00Z <br/><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-06-12T16:00:00Z ATN.java 2000-06-12T16:00:00Z 2000-06-12T16:00:00Z <br/><TEXTAREA name="code" class="java" rows="16" cols="100">/** * Title: ATN&lt;p&gt; * Description: Implements a simpl ATN parser that uses WordNet data * Copyright: Copyright (c) by Mark Watson, 2000&lt;p&gt; * @author Mark Watson * @version 1.2 */ import java.io.*; import java.util.*; public class ATN { public ATN() { try { // the following code will read either a local file of a // resource in a JAR file: InputStream ins = ClassLoader.getSystemResourceAsStream(&quot;wncache.dat&quot;); if (ins==null) { System.out.println(&quot;Failed to open 'wncache.dat'&quot;); System.exit(1); } else { ObjectInputStream p = new ObjectInputStream(ins); adj = (Hashtable)p.readObject(); adv = (Hashtable)p.readObject(); noun = (Hashtable)p.readObject(); verb = (Hashtable)p.readObject(); ins.close(); } // Augment the WordNet 1.6 entries: art = new Hashtable(); addWords(art, ARTS); conj = new Hashtable(); addWords(conj, CONJS); det = new Hashtable(); addWords(det, DETS); pron = new Hashtable(); addWords(pron, PRONS); prep = new Hashtable(); addWords(prep, PREPS); // fill in a few common verbs that are not in Wordnet 1.6: verb.put(&quot;ran&quot;, b); } catch (Exception e) { e.printStackTrace(); } } private Boolean b = new Boolean(true); private void addWords(Hashtable h, String [] ws) { for (int i=0; i&lt;ws.length; i++) { h.put(ws[i], b); } } Hashtable adj, adv, art, conj, det, noun, pron, verb, prep; String [] PRONS = {&quot;he&quot;, &quot;she&quot;, &quot;me&quot;, &quot;it&quot;, &quot;you&quot;, &quot;I&quot;}; String [] ARTS = {&quot;the&quot;, &quot;a&quot;, &quot;an&quot;}; String [] CONJS = {&quot;and&quot;, &quot;or&quot;}; String [] DETS = {&quot;who&quot;, &quot;what&quot;, &quot;where&quot;, &quot;when&quot;}; String [] PREPS = {&quot;on&quot;, &quot;at&quot;, &quot;under&quot;, &quot;above&quot;, &quot;behind&quot;, &quot;to&quot;, &quot;about&quot;,&quot;down&quot;}; private boolean checkWord(String word, int type) { if (type == PREP) { if (prep.get(word) != null) return true; } else if (type == VERB) { if (verb.get(word) != null) return true; // some simple kluges to accept words like &quot;likes&quot; when // only &quot;like&quot; is in the lexicon: if (word.endsWith(&quot;s&quot;) || word.endsWith(&quot;ed&quot;)) { String s = word.substring(0, word.length() - 1); if (verb.get(s) != null) return true; } } else if (type == NOUN) { if (noun.get(word) != null) return true; } else if (type == CONJ) { if (conj.get(word) != null) return true; } else if (type == ADJ) { if (adj.get(word) != null) return true; } else if (type == ADV) { if (adv.get(word) != null) return true; } else if (type == PRON) { if (pron.get(word) != null) return true; } else if (type == DET) { if (det.get(word) != null) return true; } else if (type == ART) { if (art.get(word) != null) return true; } return false; } public int [] parse(String s) { Vector v = new Vector(); StringTokenizer st = new StringTokenizer(s); while (st.hasMoreTokens()) { String str = st.nextToken(); if (str.length() &gt; 2 &amp;&amp; str.endsWith(&quot;,&quot;)) { str = str.substring(0, str.length() - 1); } if (str.length() &gt; 2 &amp;&amp; str.endsWith(&quot;.&quot;)) { str = str.substring(0, str.length() - 1); } if (str.length() &gt; 2 &amp;&amp; str.endsWith(&quot;:&quot;)) { str = str.substring(0, str.length() - 1); } if (str.length() &gt; 2 &amp;&amp; str.endsWith(&quot;;&quot;)) { str = str.substring(0, str.length() - 1); } v.addElement(str.toLowerCase()); } // It is easier to work with an array, so convert the Vector // to an array of Java strings: int size = v.size(); if (size == 0) return null; words = new String[size]; partsOfSpeech = new int[size]; num_words = size; for (int i=0; i&lt;size; i++) words[i] = (String)v.elementAt(i); // quick test against lexicon for word types: for (int i=0; i&lt;words.length; i++) { System.out.print(&quot;'&quot; + words[i] + &quot;' possible word types: &quot;); if (adj.get(words[i]) != null) System.out.print(&quot;adj &quot;); if (adv.get(words[i]) != null) System.out.print(&quot;adv &quot;); if (art.get(words[i]) != null) System.out.print(&quot;art &quot;); if (noun.get(words[i]) != null) System.out.print(&quot;noun &quot;); if (prep.get(words[i]) != null) System.out.print(&quot;prep &quot;); if (verb.get(words[i]) != null) System.out.print(&quot;verb &quot;); System.out.println(); } System.out.println(); // execute the parsing helper methods until one succeeds: parse_it(); return null; } String [] words; int [] partsOfSpeech; int wordIndex; int num_words; public static void main(String [] args) { ATN nf = new ATN(); if (args.length &lt; 1) { nf.parse(&quot;the dog ran down the street&quot;); } else { for (int i=0; i&lt;args.length; i++) { System.out.println(&quot;\nProcessing : &quot; + args[i]); nf.parse(args[i]); } } } //////////////// ATN functions: String getPOSname(int pos) { switch (pos) { case 1: return &quot;NP&quot;; case 2: return &quot;VP&quot;; case 3: return &quot;PP&quot;; case 1001: return &quot;noun&quot;; case 1002: return &quot;verb&quot;; case 1003: return &quot;prep&quot;; case 1004: return &quot;conj&quot;; case 1005: return &quot;adj&quot;; case 1006: return &quot;adv&quot;; case 1007: return &quot;pron&quot;; case 1008: return &quot;det&quot;; case 1009: return &quot;art&quot;; default: return &quot;unknown&quot;; } } public final static int NP = 1; public final static int VP = 2; public final static int PP = 3; public final static int NOUN = 1001; public final static int VERB = 1002; public final static int PREP = 1003; public final static int CONJ = 1004; public final static int ADJ = 1005; public final static int ADV = 1006; public final static int PRON = 1007; public final static int DET = 1008; public final static int ART = 1009; public final static int NUM_S = 9; // int [] LEN_S = { 5, 4, 3, 4, 3, 2, 2, 2, 1}; int [] ALL_S [] = { {NP, VP, NP, PP, VP}, {NP, VP, PP, NP}, {NP, VP, NP}, {VP, NP, PP, NP}, {VP, PP, NP}, {NP, VP}, {VP, PP}, {VP, NP}, {VP} }; //////////////// The actual parser: int parsePP(int start_word_index, int word_index) { if (word_index &gt;= num_words) return word_index; // test ATN transitions &lt;PREP&gt; --&gt; &lt;NP&gt; if (checkWord(words[word_index], PREP)) { partsOfSpeech[start_word_index + word_index] = PREP; int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { return ii; } } return -1; } int parseNP(int start_word_index, int word_index) { if (word_index &gt;= num_words) return word_index; // test ATN transitions &lt;NOUN&gt; --&gt; &lt;CONJ&gt; --&gt; &lt;NP&gt; if (word_index &lt; num_words - 2 &amp;&amp; checkWord(words[word_index], NOUN)) { if (checkWord(words[word_index + 1], CONJ)) { int ii = parseNP(start_word_index, word_index + 2); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = NOUN; partsOfSpeech[start_word_index + word_index + 1] = CONJ; return ii; } } } // test ATN transitions &lt;ART&gt; --&gt; &lt;NP&gt; if (word_index &lt; num_words - 1 &amp;&amp; checkWord(words[word_index], ART)) { int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = ART; return ii; } } // test ATN transitions &lt;DET&gt; --&gt; &lt;NP&gt; if (word_index &lt; num_words - 1 &amp;&amp; checkWord(words[word_index], ADJ)) { int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = ADJ; return ii; } } // test ATN transitions &lt;ADJ&gt; --&gt; &lt;NP&gt; if (checkWord(words[word_index], ADJ)) { int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = ADJ; return ii; } } // test ATN transitions &lt;ADV&gt; --&gt; &lt;NP&gt; if (word_index &lt; num_words - 1 &amp;&amp; checkWord(words[word_index], ADV)) { int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = ADV; return ii; } } // test ATN transitions &lt;NOUN&gt; --&gt; &lt;NOUN&gt; if (word_index &lt; num_words - 1 &amp;&amp; checkWord(words[word_index], NOUN)) { if (checkWord(words[word_index + 1], NOUN)) { partsOfSpeech[start_word_index + word_index] = NOUN; partsOfSpeech[start_word_index + word_index + 1] = NOUN; return word_index + 2; } } if (checkWord(words[word_index], NOUN)) { partsOfSpeech[start_word_index + word_index] = NOUN; return word_index + 1; } if (checkWord(words[word_index], PRON)) { int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { partsOfSpeech[start_word_index + word_index] = PRON; return ii; } } if (checkWord(words[word_index], PRON)) { partsOfSpeech[start_word_index + word_index] = PRON; return word_index + 1; } return -1; } int parseVP(int start_word_index, int word_index) { if (word_index &gt;= num_words) return word_index; // test ATN transitions &lt;V&gt; --&gt; &lt;NP&gt; --&gt; &lt;PP&gt; if (checkWord(words[word_index], VERB)) { partsOfSpeech[start_word_index + word_index] = VERB; int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { int jj = parsePP(start_word_index, ii); if (jj &gt; -1) { return jj; } } } // test ATN transitions &lt;V&gt; --&gt; &lt;NP&gt; if (checkWord(words[word_index], VERB)) { partsOfSpeech[start_word_index + word_index] = VERB; int ii = parseNP(start_word_index, word_index + 1); if (ii &gt; -1) { return ii; } } // test ATN transitions &lt;V&gt; --&gt; &lt;PP&gt; if (checkWord(words[word_index], VERB)) { partsOfSpeech[start_word_index + word_index] = VERB; int ii = parsePP(start_word_index, word_index + 1); if (ii &gt; -1) { return ii; } } if (checkWord(words[word_index], VERB)) { partsOfSpeech[start_word_index + word_index] = VERB; return word_index + 1; } return -1; } int parseHelper(int [] atn, int start_word_index) { int word_index = 0; int len_atn = atn.length; int last_word_index = word_index; for (int i=0; i&lt;len_atn; i++) { last_word_index = word_index; switch (atn[i]) { case NP: word_index = parseNP(start_word_index, word_index); break; case VP: word_index = parseVP(start_word_index, word_index); break; case PP: word_index = parsePP(start_word_index, word_index); break; } if (word_index == -1) return last_word_index; } return word_index; } int parseSentence(int start_word_index) { int max_val = -1; int max_word_index = 0; for (int i=0; i&lt;NUM_S; i++) { int k = parseHelper(ALL_S[i], start_word_index); //System.out.println(&quot;Score for ATN &quot; + i + &quot; is &quot; + k); if (k &gt; max_val) { max_val = k; max_word_index = i; } } System.out.println(&quot;Best ATN at word_index &quot; + max_word_index); parseHelper(ALL_S[max_word_index], start_word_index); for (int i=0; i&lt;num_words; i++) { if (partsOfSpeech[start_word_index + i] == 0) { if (checkWord(words[i], NOUN)) { partsOfSpeech[start_word_index + i] = NOUN; } if (checkWord(words[i], CONJ)) { partsOfSpeech[start_word_index + i] = CONJ; } } } return max_val; } void parse_it() { int word_index = parseSentence(0); //System.out.println(&quot;word_index from S ATN = &quot; + word_index); for (int i=0; i&lt;num_words; i++) { System.out.println(&quot; word: &quot; + words[i] + &quot; part of speech: &quot; + getPOSname(partsOfSpeech[i])); } } } </TEXTAREA><br><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-06-12T16:00:00Z MakeWordNetCache.class 2000-06-12T16:00:00Z 2000-06-12T16:00:00Z <br/><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-06-12T16:00:00Z MakeWordNetCache.java 2000-06-12T16:00:00Z 2000-06-12T16:00:00Z <br/><TEXTAREA name="code" class="java" rows="16" cols="100">/** * Title: MakeWordNetCache&lt;p&gt; * Description: Reads WordNet 1.6 index files and makes a part of speech * serialized file consisting of 4 Java hash tables (for adj, * adv, noun, and verb). * Copyright: Copyright (c) by Mark Watson, 2000&lt;p&gt; * @author Mark Watson * @version 1.1 */ /** Wordnet 1.6 Copyright and License: 1 This software and database is being provided to you, the LICENSEE, by 2 Princeton University under the following license. By obtaining, using 3 and/or copying this software and database, you agree that you have 4 read, understood, and will comply with these terms and conditions.: 5 6 Permission to use, copy, modify and distribute this software and 7 database and its documentation for any purpose and without fee or 8 royalty is hereby granted, provided that you agree to comply with 9 the following copyright notice and statements, including the disclaimer, 10 and that the same appear on ALL copies of the software, database and 11 documentation, including modifications that you make for internal 12 use or for distribution. 13 14 WordNet 1.6 Copyright 1997 by Princeton University. All rights reserved. 15 16 THIS SOFTWARE AND DATABASE IS PROVIDED &quot;AS IS&quot; AND PRINCETON 17 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR 18 IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON 19 UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT- 20 ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE 21 OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT 22 INFRINGE ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR 23 OTHER RIGHTS. 24 25 The name of Princeton University or Princeton may not be used in 26 advertising or publicity pertaining to distribution of the software 28 and/or database. Title to copyright in this software, database and 29 any associated documentation shall at all times remain with 30 Princeton University and LICENSEE agrees to preserve same. */ import java.io.*; import java.util.*; public class MakeWordNetCache { Hashtable adj = new Hashtable(); Hashtable adv = new Hashtable(); Hashtable noun = new Hashtable(); Hashtable verb = new Hashtable(); public MakeWordNetCache() { helper(&quot;index.adj&quot;, adj); helper(&quot;index.adv&quot;, adv); helper(&quot;index.noun&quot;, noun); helper(&quot;index.verb&quot;, verb); //System.out.println(verb.get(&quot;run&quot;)); try { FileOutputStream ostream = new FileOutputStream(&quot;wncache.dat&quot;); ObjectOutputStream p = new ObjectOutputStream(ostream); p.writeObject(adj); p.writeObject(adv); p.writeObject(noun); p.writeObject(verb); p.flush(); ostream.close(); } catch (Exception e) { e.printStackTrace(); } } Boolean t = new Boolean(true); public void helper(String file, Hashtable hash) { int count = 0; try { FileReader fr = new FileReader(file); BufferedReader br = new BufferedReader(fr); // skip copyright notice: for (int i=0; i&lt;30; i++) br.readLine(); while (true) { String line = br.readLine(); if (line == null) break; line = line.trim(); int index1 = line.indexOf(&quot; &quot;); if (index1 == -1) continue; line = line.substring(0, index1); int index2 = line.indexOf(&quot;.&quot;); if (index2 != -1) continue; index2 = line.indexOf(&quot;_&quot;); if (index2 != -1) continue; line = line.toLowerCase(); Object o = hash.get(line); if (o == null) { hash.put(line, t); //System.out.println(file + &quot; : &quot; + line); count++; } } System.out.println(&quot;&quot; + count + &quot; words added for &quot; + file); } catch (Exception e) { e.printStackTrace(); } } public static void main(String[] args) { MakeWordNetCache MakeWordNetCache1 = new MakeWordNetCache(); } } </TEXTAREA><br><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-06-12T16:00:00Z wncache.dat 2000-05-12T16:00:00Z 2000-05-12T16:00:00Z <br/><br/><script type="text/javascript"><!--google_ad_client = "pub-9426659565807829";google_ad_slot = "9359905831";google_ad_width = 728;google_ad_height = 15;//--></script><script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> 2000-05-12T16:00:00Z