// TextSnippet.java // ----------------- // (C) by Michael Peter Christen; mc@yacy.net // first published on http://www.anomic.de // Frankfurt, Germany, 2005 // // $LastChangedDate$ // $LastChangedRevision$ // $LastChangedBy$ // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA package de.anomic.search; import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.yacy.cora.storage.ARC; import net.yacy.cora.storage.ConcurrentARC; import net.yacy.document.Condenser; import net.yacy.document.Document; import net.yacy.document.Parser; import net.yacy.document.SnippetExtractor; import net.yacy.document.parser.html.CharacterCoding; import net.yacy.kelondro.data.meta.DigestURI; import net.yacy.kelondro.data.meta.URIMetadataRow; import net.yacy.kelondro.data.word.Word; import net.yacy.kelondro.index.HandleSet; import net.yacy.kelondro.order.Base64Order; import net.yacy.kelondro.util.ByteArray; import net.yacy.repository.LoaderDispatcher; import de.anomic.crawler.CrawlProfile; import de.anomic.crawler.retrieval.Response; import de.anomic.yacy.yacySearch; public class TextSnippet implements Comparable, Comparator { private static final int maxCache = 1000; public static final int SOURCE_CACHE = 0; public static final int SOURCE_FILE = 1; public static final int SOURCE_WEB = 2; public static final int SOURCE_METADATA = 3; public static final int ERROR_NO_HASH_GIVEN = 11; public static final int ERROR_SOURCE_LOADING = 12; public static final int ERROR_RESOURCE_LOADING = 13; public static final int ERROR_PARSER_FAILED = 14; public static final int ERROR_PARSER_NO_LINES = 15; public static final int ERROR_NO_MATCH = 16; /** * \\A[^\\p{L}\\p{N}].+ */ private final static Pattern p1 = Pattern.compile("\\A[^\\p{L}\\p{N}].+"); /** * .+[^\\p{L}\\p{N}]\\Z */ private final static Pattern p2 = Pattern.compile(".+[^\\p{L}\\p{N}]\\Z"); /** * \\A[\\p{L}\\p{N}]+[^\\p{L}\\p{N}].+\\Z */ private final static Pattern p3 = Pattern.compile("\\A[\\p{L}\\p{N}]+[^\\p{L}\\p{N}].+\\Z"); /** * [^\\p{L}\\p{N}] */ private final static Pattern p4 = Pattern.compile("[^\\p{L}\\p{N}]"); /** * (.*?)(\\<b\\>.+?\\</b\\>)(.*) */ private final static Pattern p01 = Pattern.compile("(.*?)(\\.+?\\)(.*)"); // marked words are in -tags public static class Cache { private final ARC cache; public Cache() { cache = new ConcurrentARC(maxCache, Math.max(10, Runtime.getRuntime().availableProcessors())); } public void put(final String wordhashes, final String urlhash, final String snippet) { // generate key String key = urlhash + wordhashes; // do nothing if snippet is known if (cache.containsKey(key)) return; // learn new snippet cache.put(key, snippet); } public String get(final String wordhashes, final String urlhash) { // generate key final String key = urlhash + wordhashes; return cache.get(key); } public boolean contains(final String wordhashes, final String urlhash) { return cache.containsKey(urlhash + wordhashes); } } public static final Cache snippetsCache = new Cache(); private byte[] urlhash; private String line; private String error; private int errorCode; public TextSnippet(final byte[] urlhash, final String line, final int errorCode, final String errortext) { init(urlhash, line, errorCode, errortext); } public TextSnippet(final LoaderDispatcher loader, final URIMetadataRow.Components comp, final HandleSet queryhashes, final CrawlProfile.CacheStrategy cacheStrategy, final boolean pre, final int snippetMaxLength, final int maxDocLen, final boolean reindexing) { // heise = "0OQUNU3JSs05" final DigestURI url = comp.url(); if (queryhashes.isEmpty()) { //System.out.println("found no queryhashes for URL retrieve " + url); init(url.hash(), null, ERROR_NO_HASH_GIVEN, "no query hashes given"); return; } // try to get snippet from snippetCache int source = SOURCE_CACHE; final String wordhashes = yacySearch.set2string(queryhashes); final String urls = new String(url.hash()); String line = snippetsCache.get(wordhashes, urls); if (line != null) { // found the snippet init(url.hash(), line, source, null); return; } /* =========================================================================== * LOAD RESOURCE DATA * =========================================================================== */ // if the snippet is not in the cache, we can try to get it from the htcache Response response; try { // first try to get the snippet from metadata String loc; boolean objectWasInCache = de.anomic.http.client.Cache.has(url); boolean useMetadata = !objectWasInCache && !cacheStrategy.mustBeOffline(); if (useMetadata && containsAllHashes(loc = comp.dc_title(), queryhashes)) { // try to create the snippet from information given in the url itself init(url.hash(), loc, SOURCE_METADATA, null); return; } else if (useMetadata && containsAllHashes(loc = comp.dc_creator(), queryhashes)) { // try to create the snippet from information given in the creator metadata init(url.hash(), loc, SOURCE_METADATA, null); return; } else if (useMetadata && containsAllHashes(loc = comp.dc_subject(), queryhashes)) { // try to create the snippet from information given in the subject metadata init(url.hash(), loc, SOURCE_METADATA, null); return; } else if (useMetadata && containsAllHashes(loc = comp.url().toNormalform(true, true).replace('-', ' '), queryhashes)) { // try to create the snippet from information given in the url init(url.hash(), loc, SOURCE_METADATA, null); return; } else { // try to load the resource from the cache response = loader.load(loader.request(url, true, reindexing), cacheStrategy, Long.MAX_VALUE); if (response == null) { // in case that we did not get any result we can still return a success when we are not allowed to go online if (cacheStrategy.mustBeOffline()) { init(url.hash(), null, ERROR_SOURCE_LOADING, "omitted network load (not allowed), no cache entry"); return; } // if it is still not available, report an error init(url.hash(), null, ERROR_RESOURCE_LOADING, "error loading resource from net, no cache entry"); return; } if (!objectWasInCache) { // place entry on indexing queue Switchboard.getSwitchboard().toIndexer(response); source = SOURCE_WEB; } } } catch (final Exception e) { //Log.logException(e); init(url.hash(), null, ERROR_SOURCE_LOADING, "error loading resource: " + e.getMessage()); return; } /* =========================================================================== * PARSE RESOURCE * =========================================================================== */ Document document = null; try { document = Document.mergeDocuments(response.url(), response.getMimeType(), response.parse()); } catch (final Parser.Failure e) { init(url.hash(), null, ERROR_PARSER_FAILED, e.getMessage()); // cannot be parsed return; } if (document == null) { init(url.hash(), null, ERROR_PARSER_FAILED, "parser error/failed"); // cannot be parsed return; } /* =========================================================================== * COMPUTE SNIPPET * =========================================================================== */ // we have found a parseable non-empty file: use the lines // compute snippet from text final Collection sentences = document.getSentences(pre); if (sentences == null) { init(url.hash(), null, ERROR_PARSER_NO_LINES, "parser returned no sentences"); return; } final SnippetExtractor tsr; String textline = null; HandleSet remainingHashes = queryhashes; try { tsr = new SnippetExtractor(sentences, queryhashes, snippetMaxLength); textline = tsr.getSnippet(); remainingHashes = tsr.getRemainingWords(); } catch (UnsupportedOperationException e) { init(url.hash(), null, ERROR_NO_MATCH, "no matching snippet found"); return; } // compute snippet from media //String audioline = computeMediaSnippet(document.getAudiolinks(), queryhashes); //String videoline = computeMediaSnippet(document.getVideolinks(), queryhashes); //String appline = computeMediaSnippet(document.getApplinks(), queryhashes); //String hrefline = computeMediaSnippet(document.getAnchors(), queryhashes); //String imageline = computeMediaSnippet(document.getAudiolinks(), queryhashes); line = ""; //if (audioline != null) line += (line.length() == 0) ? audioline : "
" + audioline; //if (videoline != null) line += (line.length() == 0) ? videoline : "
" + videoline; //if (appline != null) line += (line.length() == 0) ? appline : "
" + appline; //if (hrefline != null) line += (line.length() == 0) ? hrefline : "
" + hrefline; if (textline != null) line += (line.length() == 0) ? textline : "
" + textline; if (line == null || !remainingHashes.isEmpty()) { init(url.hash(), null, ERROR_NO_MATCH, "no matching snippet found"); return; } if (line.length() > snippetMaxLength) line = line.substring(0, snippetMaxLength); // finally store this snippet in our own cache snippetsCache.put(wordhashes, urls, line); document.close(); init(url.hash(), line, source, null); } private void init(final byte[] urlhash, final String line, final int errorCode, final String errortext) { this.urlhash = urlhash; this.line = line; this.errorCode = errorCode; this.error = errortext; } public boolean exists() { return line != null; } public String getLineRaw() { return (line == null) ? "" : line; } public String getError() { return (error == null) ? "" : error.trim(); } public int getErrorCode() { return errorCode; } public String getLineMarked(final HandleSet queryHashes) { if (line == null) return ""; if (queryHashes == null || queryHashes.isEmpty()) return line.trim(); if (line.endsWith(".")) line = line.substring(0, line.length() - 1); final Iterator i = queryHashes.iterator(); byte[] h; final String[] w = line.split(" "); while (i.hasNext()) { h = i.next(); for (int j = 0; j < w.length; j++) { final ArrayList al = markedWordArrayList(w[j]); // mark special character separated words correctly if more than 1 word has to be marked w[j] = ""; for (int k = 0; k < al.size(); k++) { if(k % 2 == 0){ // word has not been marked w[j] += getWordMarked(al.get(k), h); } else { // word has been marked, do not encode again w[j] += al.get(k); } } } } final StringBuilder l = new StringBuilder(line.length() + queryHashes.size() * 8); for (int j = 0; j < w.length; j++) { l.append(w[j]); l.append(' '); } return l.toString().trim(); } public int compareTo(TextSnippet o) { return Base64Order.enhancedCoder.compare(this.urlhash, o.urlhash); } public int compare(TextSnippet o1, TextSnippet o2) { return o1.compareTo(o2); } public int hashCode() { return ByteArray.hashCode(this.urlhash); } @Override public String toString() { return (line == null) ? "" : line; } /** * mark words with <b>-tags * @param word the word to mark * @param h the hash of the word to mark * @return the marked word if hash matches, else the unmarked word * @see #getLineMarked(Set) */ private static String getWordMarked(String word, byte[] h){ //ignore punctuation marks (contrib [MN]) //note to myself: //For details on regex see "Mastering regular expressions" by J.E.F. Friedl //especially p. 123 and p. 390/391 (in the German version of the 2nd edition) String prefix = ""; String postfix = ""; int len = 0; // cut off prefix if it contains of non-characters or non-numbers while(p1.matcher(word).find()) { prefix = prefix + word.substring(0,1); word = word.substring(1); } // cut off postfix if it contains of non-characters or non-numbers while(p2.matcher(word).find()) { len = word.length(); postfix = word.substring(len-1,len) + postfix; word = word.substring(0,len-1); } //special treatment if there is a special character in the word if(p3.matcher(word).find()) { String out = ""; String temp = ""; for(int k=0; k < word.length(); k++) { //is character a special character? if(p4.matcher(word.substring(k,k+1)).find()) { if (new String(Word.word2hash(temp)).equals(new String(h))) temp = "" + CharacterCoding.unicode2html(temp, false) + ""; out = out + temp + CharacterCoding.unicode2html(word.substring(k,k+1), false); temp = ""; } //last character else if(k == (word.length()-1)) { temp = temp + word.substring(k,k+1); if (new String(Word.word2hash(temp)).equals(new String(h))) temp = "" + CharacterCoding.unicode2html(temp, false) + ""; out = out + temp; temp = ""; } else temp = temp + word.substring(k,k+1); } word = out; } //end contrib [MN] else if (new String(Word.word2hash(word)).equals(new String(h))) word = "" + CharacterCoding.unicode2html(word, false) + ""; word = CharacterCoding.unicode2html(prefix, false) + word + CharacterCoding.unicode2html(postfix, false); return word; } /** * words that already has been marked has index (i % 2 == 1) * words that has not yet been marked has index (i % 2 == 0) * @param string the String to be processed * @return words that already has and has not yet been marked * @author [DW], 08.11.2008 */ private static ArrayList markedWordArrayList(String string){ ArrayList al = new java.util.ArrayList(1); Matcher m = p01.matcher(string); while (m.find()) { al.add(m.group(1)); al.add(m.group(2)); string = m.group(3); // the postfix m = p01.matcher(string); } al.add(string); return al; } private static boolean containsAllHashes(final String sentence, final HandleSet queryhashes) { final TreeMap m = Condenser.hashSentence(sentence); for (byte[] b: queryhashes) { if (!(m.containsKey(b))) return false; } return true; } }