2007-08-06 02:56:56 +02:00
// plasmaSearchEvent.java
// (C) 2005 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 10.10.2005 on http://yacy.net
2005-10-10 02:32:15 +02:00
//
2007-08-06 02:56:56 +02:00
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
2005-10-10 02:32:15 +02:00
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.plasma ;
2007-08-28 14:15:46 +02:00
import java.io.UnsupportedEncodingException ;
2007-08-26 20:18:35 +02:00
import java.util.ArrayList ;
2007-08-28 14:15:46 +02:00
import java.util.Date ;
2007-08-25 01:12:59 +02:00
import java.util.HashMap ;
2005-10-11 09:06:33 +02:00
import java.util.Iterator ;
2006-09-11 00:36:47 +02:00
import java.util.Map ;
2007-09-07 13:45:38 +02:00
import java.util.Set ;
2006-09-11 12:39:25 +02:00
import java.util.TreeMap ;
2007-08-28 14:15:46 +02:00
import java.util.TreeSet ;
2005-10-12 14:28:49 +02:00
2006-09-30 00:27:20 +02:00
import de.anomic.index.indexContainer ;
2007-08-26 20:18:35 +02:00
import de.anomic.index.indexRWIEntry ;
2007-08-28 14:15:46 +02:00
import de.anomic.index.indexURLEntry ;
import de.anomic.kelondro.kelondroBitfield ;
2006-09-12 13:13:27 +02:00
import de.anomic.kelondro.kelondroMSetTools ;
2005-10-12 14:28:49 +02:00
import de.anomic.server.logging.serverLog ;
2006-09-16 02:07:09 +02:00
import de.anomic.yacy.yacyCore ;
2007-08-28 14:15:46 +02:00
import de.anomic.yacy.yacyDHTAction ;
2005-10-13 15:57:15 +02:00
import de.anomic.yacy.yacySearch ;
2007-08-28 14:15:46 +02:00
import de.anomic.yacy.yacySeed ;
2007-09-05 11:01:35 +02:00
import de.anomic.yacy.yacyURL ;
2005-10-10 02:32:15 +02:00
2007-08-15 13:36:59 +02:00
public final class plasmaSearchEvent {
2005-10-10 02:32:15 +02:00
2007-09-08 13:50:19 +02:00
public static int workerThreadCount = 10 ;
2007-08-26 20:18:35 +02:00
public static String lastEventID = " " ;
2007-08-25 01:12:59 +02:00
private static HashMap lastEvents = new HashMap ( ) ; // a cache for objects from this class: re-use old search requests
public static final long eventLifetime = 600000 ; // the time an event will stay in the cache, 10 Minutes
2007-11-07 23:38:09 +01:00
private static final int max_results_preparation = 200 ;
2007-08-25 01:12:59 +02:00
private long eventTime ;
2005-10-11 09:06:33 +02:00
private plasmaSearchQuery query ;
2006-02-05 00:51:00 +01:00
private plasmaSearchRankingProfile ranking ;
2005-10-12 14:28:49 +02:00
private plasmaWordIndex wordIndex ;
2007-11-07 23:38:09 +01:00
private plasmaSearchRankingProcess rankedCache ; // ordered search results, grows dynamically as all the query threads enrich this container
2006-09-12 02:42:42 +02:00
private Map rcAbstracts ; // cache for index abstracts; word:TreeMap mapping where the embedded TreeMap is a urlhash:peerlist relation
2007-09-04 01:43:55 +02:00
private plasmaSearchProcessing process ;
2006-09-13 19:13:28 +02:00
private yacySearch [ ] primarySearchThreads , secondarySearchThreads ;
2007-09-28 03:36:22 +02:00
private Thread localSearchThread ;
2007-04-30 00:05:34 +02:00
private TreeMap preselectedPeerHashes ;
2007-09-07 13:45:38 +02:00
//private Object[] references;
2007-08-28 14:15:46 +02:00
public TreeMap IAResults , IACount ;
public String IAmaxcounthash , IAneardhthash ;
2007-09-04 01:43:55 +02:00
private int localcount ;
private resultWorker [ ] workerThreads ;
private ArrayList resultList ; // list of this.Entry objects
2007-09-08 13:50:19 +02:00
//private int resultListLock; // a pointer that shows that all elements below this pointer are fixed and may not be changed again
2007-09-04 01:43:55 +02:00
private HashMap failedURLs ; // a mapping from a urlhash to a fail reason string
TreeSet snippetFetchWordHashes ; // a set of word hashes that are used to match with the snippets
2007-09-26 12:11:50 +02:00
private long urlRetrievalAllTime ;
private long snippetComputationAllTime ;
2005-10-12 14:28:49 +02:00
2007-08-25 01:12:59 +02:00
private plasmaSearchEvent ( plasmaSearchQuery query ,
2006-02-05 00:51:00 +01:00
plasmaSearchRankingProfile ranking ,
2007-08-06 01:57:25 +02:00
plasmaSearchProcessing localTiming ,
2006-02-05 00:51:00 +01:00
plasmaWordIndex wordIndex ,
2007-08-28 14:15:46 +02:00
TreeMap preselectedPeerHashes ,
boolean generateAbstracts ,
TreeSet abstractSet ) {
2007-08-25 01:12:59 +02:00
this . eventTime = System . currentTimeMillis ( ) ; // for lifetime check
2005-10-12 14:28:49 +02:00
this . wordIndex = wordIndex ;
2005-10-11 09:06:33 +02:00
this . query = query ;
2006-02-05 00:51:00 +01:00
this . ranking = ranking ;
2007-04-10 14:27:03 +02:00
this . rcAbstracts = ( query . queryHashes . size ( ) > 1 ) ? new TreeMap ( ) : null ; // generate abstracts only for combined searches
2007-09-04 01:43:55 +02:00
this . process = localTiming ;
2006-09-13 19:13:28 +02:00
this . primarySearchThreads = null ;
this . secondarySearchThreads = null ;
2007-04-26 11:51:51 +02:00
this . preselectedPeerHashes = preselectedPeerHashes ;
2007-08-28 14:15:46 +02:00
this . IAResults = new TreeMap ( ) ;
this . IACount = new TreeMap ( ) ;
this . IAmaxcounthash = null ;
this . IAneardhthash = null ;
2007-09-04 01:43:55 +02:00
this . localcount = 0 ;
2007-09-26 12:11:50 +02:00
this . urlRetrievalAllTime = 0 ;
this . snippetComputationAllTime = 0 ;
2007-09-04 01:43:55 +02:00
this . workerThreads = null ;
this . resultList = new ArrayList ( 10 ) ; // this is the result set which is filled up with search results, enriched with snippets
2007-09-08 13:50:19 +02:00
//this.resultListLock = 0; // no locked elements until now
2007-09-04 01:43:55 +02:00
this . failedURLs = new HashMap ( ) ; // a map of urls to reason strings where a worker thread tried to work on, but failed.
// snippets do not need to match with the complete query hashes,
// only with the query minus the stopwords which had not been used for the search
final TreeSet filtered = kelondroMSetTools . joinConstructive ( query . queryHashes , plasmaSwitchboard . stopwords ) ;
this . snippetFetchWordHashes = ( TreeSet ) query . queryHashes . clone ( ) ;
if ( ( filtered ! = null ) & & ( filtered . size ( ) > 0 ) ) {
kelondroMSetTools . excludeDestructive ( this . snippetFetchWordHashes , plasmaSwitchboard . stopwords ) ;
}
2007-08-25 01:12:59 +02:00
long start = System . currentTimeMillis ( ) ;
if ( ( query . domType = = plasmaSearchQuery . SEARCHDOM_GLOBALDHT ) | |
( query . domType = = plasmaSearchQuery . SEARCHDOM_CLUSTERALL ) ) {
2007-11-07 23:38:09 +01:00
this . rankedCache = new plasmaSearchRankingProcess ( query , process , ranking , max_results_preparation ) ;
2007-09-04 01:43:55 +02:00
2007-08-25 01:12:59 +02:00
int fetchpeers = ( int ) ( query . maximumTime / 500L ) ; // number of target peers; means 10 peers in 10 seconds
if ( fetchpeers > 50 ) fetchpeers = 50 ;
if ( fetchpeers < 30 ) fetchpeers = 30 ;
// do a global search
// the result of the fetch is then in the rcGlobal
2007-09-04 01:43:55 +02:00
process . startTimer ( ) ;
serverLog . logFine ( " SEARCH_EVENT " , " STARTING " + fetchpeers + " THREADS TO CATCH EACH " + query . displayResults ( ) + " URLs " ) ;
2007-09-08 13:50:19 +02:00
this . primarySearchThreads = yacySearch . primaryRemoteSearches (
2007-08-25 01:12:59 +02:00
plasmaSearchQuery . hashSet2hashString ( query . queryHashes ) ,
plasmaSearchQuery . hashSet2hashString ( query . excludeHashes ) ,
" " ,
query . prefer ,
query . urlMask ,
2007-09-04 01:43:55 +02:00
query . displayResults ( ) ,
2007-08-25 01:12:59 +02:00
query . maxDistance ,
wordIndex ,
2007-09-04 01:43:55 +02:00
rankedCache ,
2007-08-25 01:12:59 +02:00
rcAbstracts ,
fetchpeers ,
plasmaSwitchboard . urlBlacklist ,
ranking ,
query . constraint ,
( query . domType = = plasmaSearchQuery . SEARCHDOM_GLOBALDHT ) ? null : preselectedPeerHashes ) ;
2007-09-08 13:50:19 +02:00
process . yield ( " remote search thread start " , this . primarySearchThreads . length ) ;
2007-09-04 01:43:55 +02:00
2007-08-25 01:12:59 +02:00
// meanwhile do a local search
2007-09-28 03:36:22 +02:00
localSearchThread = new localSearchProcess ( ) ;
localSearchThread . start ( ) ;
2007-09-04 01:43:55 +02:00
2007-08-25 01:12:59 +02:00
// finished searching
serverLog . logFine ( " SEARCH_EVENT " , " SEARCH TIME AFTER GLOBAL-TRIGGER TO " + primarySearchThreads . length + " PEERS: " + ( ( System . currentTimeMillis ( ) - start ) / 1000 ) + " seconds " ) ;
} else {
2007-09-04 01:43:55 +02:00
Map [ ] searchContainerMaps = process . localSearchContainers ( query , wordIndex , null ) ;
2007-08-25 01:12:59 +02:00
2007-08-28 14:15:46 +02:00
if ( generateAbstracts ) {
// compute index abstracts
2007-09-04 01:43:55 +02:00
process . startTimer ( ) ;
2007-08-28 14:15:46 +02:00
Iterator ci = searchContainerMaps [ 0 ] . entrySet ( ) . iterator ( ) ;
Map . Entry entry ;
int maxcount = - 1 ;
double mindhtdistance = 1 . 1 , d ;
String wordhash ;
while ( ci . hasNext ( ) ) {
entry = ( Map . Entry ) ci . next ( ) ;
wordhash = ( String ) entry . getKey ( ) ;
indexContainer container = ( indexContainer ) entry . getValue ( ) ;
assert ( container . getWordHash ( ) . equals ( wordhash ) ) ;
if ( container . size ( ) > maxcount ) {
IAmaxcounthash = wordhash ;
maxcount = container . size ( ) ;
}
2007-10-01 14:30:23 +02:00
d = yacyDHTAction . dhtDistance ( yacyCore . seedDB . mySeed ( ) . hash , wordhash ) ;
2007-08-28 14:15:46 +02:00
if ( d < mindhtdistance ) {
// calculate the word hash that is closest to our dht position
mindhtdistance = d ;
IAneardhthash = wordhash ;
}
IACount . put ( wordhash , new Integer ( container . size ( ) ) ) ;
2007-09-05 11:01:35 +02:00
IAResults . put ( wordhash , plasmaSearchProcessing . compressIndex ( container , null , 1000 ) . toString ( ) ) ;
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
process . yield ( " abstract generation " , searchContainerMaps [ 0 ] . size ( ) ) ;
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
indexContainer rcLocal =
2007-08-25 01:12:59 +02:00
( searchContainerMaps = = null ) ?
plasmaWordIndex . emptyContainer ( null , 0 ) :
2007-09-04 01:43:55 +02:00
process . localSearchJoinExclude (
2007-08-25 01:12:59 +02:00
searchContainerMaps [ 0 ] . values ( ) ,
searchContainerMaps [ 1 ] . values ( ) ,
query . maxDistance ) ;
this . localcount = rcLocal . size ( ) ;
2007-11-07 23:38:09 +01:00
this . rankedCache = new plasmaSearchRankingProcess ( query , process , ranking , max_results_preparation ) ;
this . rankedCache . insert ( rcLocal , true ) ;
2007-08-25 01:12:59 +02:00
}
2007-09-05 11:01:35 +02:00
2007-09-04 01:43:55 +02:00
if ( query . onlineSnippetFetch ) {
// start worker threads to fetch urls and snippets
this . workerThreads = new resultWorker [ workerThreadCount ] ;
for ( int i = 0 ; i < workerThreadCount ; i + + ) {
this . workerThreads [ i ] = new resultWorker ( i , process . getTargetTime ( ) * 3 ) ;
this . workerThreads [ i ] . start ( ) ;
}
} else {
// prepare result vector directly without worker threads
2007-09-05 11:01:35 +02:00
process . startTimer ( ) ;
2007-11-07 23:38:09 +01:00
indexRWIEntry entry ;
indexURLEntry page ;
ResultEntry resultEntry ;
synchronized ( rankedCache ) {
Iterator indexRWIEntryIterator = rankedCache . entries ( ) ;
while ( ( indexRWIEntryIterator . hasNext ( ) ) & & ( resultList . size ( ) < ( query . neededResults ( ) ) ) ) {
// fetch next entry
entry = ( indexRWIEntry ) indexRWIEntryIterator . next ( ) ;
page = wordIndex . loadedURL . load ( entry . urlHash ( ) , entry ) ;
2007-09-07 13:45:38 +02:00
2007-11-07 23:38:09 +01:00
if ( page = = null ) {
registerFailure ( entry . urlHash ( ) , " url does not exist in lurl-db " ) ;
continue ;
}
2007-09-07 13:45:38 +02:00
2007-11-07 23:38:09 +01:00
resultEntry = obtainResultEntry ( page , ( snippetComputationAllTime < 300 ) ? 1 : 0 ) ;
if ( resultEntry = = null ) continue ; // the entry had some problems, cannot be used
urlRetrievalAllTime + = resultEntry . dbRetrievalTime ;
snippetComputationAllTime + = resultEntry . snippetComputationTime ;
2007-09-04 01:43:55 +02:00
2007-11-07 23:38:09 +01:00
// place the result to the result vector
synchronized ( resultList ) {
resultList . add ( resultEntry ) ;
}
2007-09-04 01:43:55 +02:00
2007-11-07 23:38:09 +01:00
// add references
synchronized ( rankedCache ) {
rankedCache . addReferences ( resultEntry ) ;
}
2007-09-04 01:43:55 +02:00
}
}
2007-09-05 11:01:35 +02:00
process . yield ( " offline snippet fetch " , resultList . size ( ) ) ;
2007-09-04 01:43:55 +02:00
}
2007-08-25 01:12:59 +02:00
2007-09-07 13:45:38 +02:00
// clean up events
cleanupEvents ( ) ;
2007-08-25 01:12:59 +02:00
// store this search to a cache so it can be re-used
lastEvents . put ( query . id ( ) , this ) ;
2007-08-26 20:18:35 +02:00
lastEventID = query . id ( ) ;
2005-10-12 14:28:49 +02:00
}
2007-09-28 03:36:22 +02:00
private class localSearchProcess extends Thread {
public localSearchProcess ( ) {
}
public void run ( ) {
// do a local search
Map [ ] searchContainerMaps = process . localSearchContainers ( query , wordIndex , null ) ;
// use the search containers to fill up rcAbstracts locally
/ *
if ( ( rcAbstracts ! = null ) & & ( searchContainerMap ! = null ) ) {
Iterator i , ci = searchContainerMap . entrySet ( ) . iterator ( ) ;
Map . Entry entry ;
String wordhash ;
indexContainer container ;
TreeMap singleAbstract ;
String mypeerhash = yacyCore . seedDB . mySeed . hash ;
while ( ci . hasNext ( ) ) {
entry = ( Map . Entry ) ci . next ( ) ;
wordhash = ( String ) entry . getKey ( ) ;
container = ( indexContainer ) entry . getValue ( ) ;
// collect all urlhashes from the container
synchronized ( rcAbstracts ) {
singleAbstract = ( TreeMap ) rcAbstracts . get ( wordhash ) ; // a mapping from url-hashes to a string of peer-hashes
if ( singleAbstract = = null ) singleAbstract = new TreeMap ( ) ;
i = container . entries ( ) ;
while ( i . hasNext ( ) ) singleAbstract . put ( ( ( indexEntry ) i . next ( ) ) . urlHash ( ) , mypeerhash ) ;
rcAbstracts . put ( wordhash , singleAbstract ) ;
}
}
}
* /
// join and exlcude the local result
indexContainer rcLocal =
( searchContainerMaps = = null ) ?
plasmaWordIndex . emptyContainer ( null , 0 ) :
process . localSearchJoinExclude (
searchContainerMaps [ 0 ] . values ( ) ,
searchContainerMaps [ 1 ] . values ( ) ,
query . maxDistance ) ;
localcount = rcLocal . size ( ) ;
// sort the local containers and truncate it to a limited count,
// so following sortings together with the global results will be fast
synchronized ( rankedCache ) {
2007-11-07 23:38:09 +01:00
rankedCache . insert ( rcLocal , true ) ;
2007-09-28 03:36:22 +02:00
}
}
}
2007-09-04 01:43:55 +02:00
2007-09-07 13:45:38 +02:00
private static void cleanupEvents ( ) {
// remove old events in the event cache
Iterator i = lastEvents . entrySet ( ) . iterator ( ) ;
plasmaSearchEvent cleanEvent ;
while ( i . hasNext ( ) ) {
cleanEvent = ( plasmaSearchEvent ) ( ( Map . Entry ) i . next ( ) ) . getValue ( ) ;
if ( cleanEvent . eventTime + eventLifetime < System . currentTimeMillis ( ) ) {
// execute deletion of failed words
Set removeWords = cleanEvent . query . queryHashes ;
removeWords . addAll ( cleanEvent . query . excludeHashes ) ;
cleanEvent . wordIndex . removeEntriesMultiple ( removeWords , cleanEvent . failedURLs . keySet ( ) ) ;
serverLog . logInfo ( " SearchEvents " , " cleaning up event " + cleanEvent . query . id ( ) + " , removed " + cleanEvent . failedURLs . size ( ) + " URL references on " + removeWords . size ( ) + " words " ) ;
// remove the event
i . remove ( ) ;
}
}
}
2007-10-03 18:42:11 +02:00
private ResultEntry obtainResultEntry ( indexURLEntry page , int snippetFetchMode ) {
2007-09-04 01:43:55 +02:00
// a search result entry needs some work to produce a result Entry:
// - check if url entry exists in LURL-db
// - check exclusions, constraints, masks, media-domains
// - load snippet (see if page exists) and check if snippet contains searched word
2007-10-03 18:42:11 +02:00
// Snippet Fetching can has 3 modes:
// 0 - do not fetch snippets
// 1 - fetch snippets offline only
// 2 - online snippet fetch
2007-09-04 01:43:55 +02:00
// load only urls if there was not yet a root url of that hash
// find the url entry
2007-09-05 11:01:35 +02:00
2007-09-26 12:11:50 +02:00
long startTime = System . currentTimeMillis ( ) ;
2007-09-04 01:43:55 +02:00
indexURLEntry . Components comp = page . comp ( ) ;
String pagetitle = comp . title ( ) . toLowerCase ( ) ;
if ( comp . url ( ) = = null ) {
2007-09-07 13:45:38 +02:00
registerFailure ( page . hash ( ) , " url corrupted (null) " ) ;
2007-09-04 01:43:55 +02:00
return null ; // rare case where the url is corrupted
}
String pageurl = comp . url ( ) . toString ( ) . toLowerCase ( ) ;
String pageauthor = comp . author ( ) . toLowerCase ( ) ;
2007-09-26 12:11:50 +02:00
long dbRetrievalTime = System . currentTimeMillis ( ) - startTime ;
2007-09-04 01:43:55 +02:00
// check exclusion
2007-09-07 13:45:38 +02:00
if ( ( plasmaSearchQuery . matches ( pagetitle , query . excludeHashes ) ) | |
( plasmaSearchQuery . matches ( pageurl , query . excludeHashes ) ) | |
( plasmaSearchQuery . matches ( pageauthor , query . excludeHashes ) ) ) {
2007-09-04 01:43:55 +02:00
return null ;
}
// check url mask
if ( ! ( pageurl . matches ( query . urlMask ) ) ) {
return null ;
}
// check constraints
if ( ( ! ( query . constraint . equals ( plasmaSearchQuery . catchall_constraint ) ) ) & &
( query . constraint . get ( plasmaCondenser . flag_cat_indexof ) ) & &
( ! ( comp . title ( ) . startsWith ( " Index of " ) ) ) ) {
final Iterator wi = query . queryHashes . iterator ( ) ;
while ( wi . hasNext ( ) ) wordIndex . removeEntry ( ( String ) wi . next ( ) , page . hash ( ) ) ;
2007-10-03 16:34:05 +02:00
registerFailure ( page . hash ( ) , " index-of constraint not fullfilled " ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_AUDIO ) & & ( page . laudio ( ) = = 0 ) ) {
2007-10-03 16:34:05 +02:00
registerFailure ( page . hash ( ) , " contentdom-audio constraint not fullfilled " ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_VIDEO ) & & ( page . lvideo ( ) = = 0 ) ) {
2007-10-03 16:34:05 +02:00
registerFailure ( page . hash ( ) , " contentdom-video constraint not fullfilled " ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_IMAGE ) & & ( page . limage ( ) = = 0 ) ) {
2007-10-03 16:34:05 +02:00
registerFailure ( page . hash ( ) , " contentdom-image constraint not fullfilled " ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_APP ) & & ( page . lapp ( ) = = 0 ) ) {
2007-10-03 16:34:05 +02:00
registerFailure ( page . hash ( ) , " contentdom-app constraint not fullfilled " ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
2007-10-03 18:42:11 +02:00
if ( snippetFetchMode = = 0 ) {
return new ResultEntry ( page , wordIndex , null , null , dbRetrievalTime , 0 ) ; // result without snippet
}
2007-09-04 01:43:55 +02:00
// load snippet
if ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_TEXT ) {
// attach text snippet
2007-09-26 12:11:50 +02:00
startTime = System . currentTimeMillis ( ) ;
2007-10-03 18:42:11 +02:00
plasmaSnippetCache . TextSnippet snippet = plasmaSnippetCache . retrieveTextSnippet ( comp . url ( ) , snippetFetchWordHashes , ( snippetFetchMode = = 2 ) , query . constraint . get ( plasmaCondenser . flag_cat_indexof ) , 180 , 3000 , ( snippetFetchMode = = 2 ) ? Integer . MAX_VALUE : 100000 ) ;
2007-09-26 12:11:50 +02:00
long snippetComputationTime = System . currentTimeMillis ( ) - startTime ;
2007-10-03 17:45:12 +02:00
serverLog . logInfo ( " SEARCH_EVENT " , " text snippet load time for " + comp . url ( ) + " : " + snippetComputationTime + " , " + ( ( snippet . getErrorCode ( ) < 11 ) ? " snippet found " : ( " no snippet found ( " + snippet . getError ( ) + " ) " ) ) ) ;
2007-09-26 12:11:50 +02:00
2007-09-04 01:43:55 +02:00
if ( snippet . getErrorCode ( ) < 11 ) {
// we loaded the file and found the snippet
2007-09-26 12:11:50 +02:00
return new ResultEntry ( page , wordIndex , snippet , null , dbRetrievalTime , snippetComputationTime ) ; // result with snippet attached
2007-10-03 18:42:11 +02:00
} else if ( snippetFetchMode = = 1 ) {
2007-09-04 01:43:55 +02:00
// we did not demand online loading, therefore a failure does not mean that the missing snippet causes a rejection of this result
// this may happen during a remote search, because snippet loading is omitted to retrieve results faster
2007-09-26 12:11:50 +02:00
return new ResultEntry ( page , wordIndex , null , null , dbRetrievalTime , snippetComputationTime ) ; // result without snippet
2007-09-04 01:43:55 +02:00
} else {
// problems with snippet fetch
2007-09-07 13:45:38 +02:00
registerFailure ( page . hash ( ) , " no text snippet for URL " + comp . url ( ) ) ;
2007-09-04 01:43:55 +02:00
plasmaSnippetCache . failConsequences ( snippet , query . id ( ) ) ;
return null ;
}
} else {
// attach media information
2007-09-26 12:11:50 +02:00
startTime = System . currentTimeMillis ( ) ;
2007-10-03 18:42:11 +02:00
ArrayList mediaSnippets = plasmaSnippetCache . retrieveMediaSnippets ( comp . url ( ) , snippetFetchWordHashes , query . contentdom , ( snippetFetchMode = = 2 ) , 6000 ) ;
2007-09-26 12:11:50 +02:00
long snippetComputationTime = System . currentTimeMillis ( ) - startTime ;
2007-10-03 16:34:05 +02:00
serverLog . logInfo ( " SEARCH_EVENT " , " media snippet load time for " + comp . url ( ) + " : " + snippetComputationTime ) ;
2007-09-26 12:11:50 +02:00
2007-09-04 01:43:55 +02:00
if ( ( mediaSnippets ! = null ) & & ( mediaSnippets . size ( ) > 0 ) ) {
// found media snippets, return entry
2007-09-26 12:11:50 +02:00
return new ResultEntry ( page , wordIndex , null , mediaSnippets , dbRetrievalTime , snippetComputationTime ) ;
2007-10-03 18:42:11 +02:00
} else if ( snippetFetchMode = = 1 ) {
2007-09-26 12:11:50 +02:00
return new ResultEntry ( page , wordIndex , null , null , dbRetrievalTime , snippetComputationTime ) ;
2007-09-04 01:43:55 +02:00
} else {
// problems with snippet fetch
2007-09-07 13:45:38 +02:00
registerFailure ( page . hash ( ) , " no media snippet for URL " + comp . url ( ) ) ;
2007-09-04 01:43:55 +02:00
return null ;
}
}
// finished, no more actions possible here
}
private boolean anyWorkerAlive ( ) {
if ( this . workerThreads = = null ) return false ;
for ( int i = 0 ; i < workerThreadCount ; i + + ) {
if ( ( this . workerThreads [ i ] ! = null ) & & ( this . workerThreads [ i ] . isAlive ( ) ) ) return true ;
}
return false ;
}
2005-10-12 14:28:49 +02:00
2007-09-08 13:50:19 +02:00
private boolean anyRemoteSearchAlive ( ) {
// check primary search threads
if ( ( this . primarySearchThreads ! = null ) & & ( this . primarySearchThreads . length ! = 0 ) ) {
for ( int i = 0 ; i < this . primarySearchThreads . length ; i + + ) {
if ( ( this . primarySearchThreads [ i ] ! = null ) & & ( this . primarySearchThreads [ i ] . isAlive ( ) ) ) return true ;
}
}
// maybe a secondary search thread is alivem check this
if ( ( this . secondarySearchThreads ! = null ) & & ( this . secondarySearchThreads . length ! = 0 ) ) {
2007-10-04 10:50:33 +02:00
for ( int i = 0 ; i < this . secondarySearchThreads . length ; i + + ) {
2007-09-08 13:50:19 +02:00
if ( ( this . secondarySearchThreads [ i ] ! = null ) & & ( this . secondarySearchThreads [ i ] . isAlive ( ) ) ) return true ;
}
}
return false ;
}
2005-10-24 02:34:15 +02:00
public plasmaSearchQuery getQuery ( ) {
return query ;
}
2007-08-24 10:41:52 +02:00
public plasmaSearchRankingProfile getRanking ( ) {
return ranking ;
}
2007-09-04 01:43:55 +02:00
public plasmaSearchProcessing getProcess ( ) {
return process ;
2005-10-27 02:42:08 +02:00
}
2006-09-13 19:13:28 +02:00
public yacySearch [ ] getPrimarySearchThreads ( ) {
return primarySearchThreads ;
}
2007-11-07 23:38:09 +01:00
2006-09-13 19:13:28 +02:00
public yacySearch [ ] getSecondarySearchThreads ( ) {
return secondarySearchThreads ;
2005-10-24 02:34:15 +02:00
}
2007-08-24 10:41:52 +02:00
public int getLocalCount ( ) {
return this . localcount ;
2007-08-16 13:44:18 +02:00
}
2007-08-24 10:41:52 +02:00
public int getGlobalCount ( ) {
2007-09-04 01:43:55 +02:00
return this . rankedCache . getGlobalCount ( ) ;
2007-01-15 02:50:57 +01:00
}
2007-09-26 12:11:50 +02:00
public long getURLRetrievalTime ( ) {
return this . urlRetrievalAllTime ;
}
public long getSnippetComputationTime ( ) {
return this . snippetComputationAllTime ;
}
2007-08-16 13:44:18 +02:00
2007-08-26 20:18:35 +02:00
public static plasmaSearchEvent getEvent ( String eventID ) {
2007-09-06 03:28:35 +02:00
synchronized ( lastEvents ) {
return ( plasmaSearchEvent ) lastEvents . get ( eventID ) ;
}
2007-08-26 20:18:35 +02:00
}
2007-08-25 01:12:59 +02:00
public static plasmaSearchEvent getEvent ( plasmaSearchQuery query ,
plasmaSearchRankingProfile ranking ,
plasmaSearchProcessing localTiming ,
plasmaWordIndex wordIndex ,
2007-08-28 14:15:46 +02:00
TreeMap preselectedPeerHashes ,
boolean generateAbstracts ,
TreeSet abstractSet ) {
2007-09-06 03:28:35 +02:00
synchronized ( lastEvents ) {
plasmaSearchEvent event = ( plasmaSearchEvent ) lastEvents . get ( query . id ( ) ) ;
if ( event = = null ) {
event = new plasmaSearchEvent ( query , ranking , localTiming , wordIndex , preselectedPeerHashes , generateAbstracts , abstractSet ) ;
} else {
//re-new the event time for this event, so it is not deleted next time too early
event . eventTime = System . currentTimeMillis ( ) ;
// replace the query, because this contains the current result offset
event . query = query ;
}
2007-09-04 01:43:55 +02:00
2007-09-06 03:28:35 +02:00
// if worker threads had been alive, but did not succeed, start them again to fetch missing links
if ( ( query . onlineSnippetFetch ) & &
( ! event . anyWorkerAlive ( ) ) & &
2007-09-08 13:50:19 +02:00
( event . resultList . size ( ) < query . neededResults ( ) + 10 ) & &
2007-09-06 03:28:35 +02:00
( ( event . getLocalCount ( ) + event . getGlobalCount ( ) ) > event . resultList . size ( ) ) ) {
// set new timeout
event . eventTime = System . currentTimeMillis ( ) ;
// start worker threads to fetch urls and snippets
event . workerThreads = new resultWorker [ workerThreadCount ] ;
for ( int i = 0 ; i < workerThreadCount ; i + + ) {
event . workerThreads [ i ] = event . deployWorker ( i , 3 * event . process . getTargetTime ( ) ) ;
}
2007-08-25 01:12:59 +02:00
}
2007-09-06 03:28:35 +02:00
return event ;
2007-08-25 01:12:59 +02:00
}
2007-09-04 01:43:55 +02:00
}
private resultWorker deployWorker ( int id , long lifetime ) {
resultWorker worker = new resultWorker ( id , lifetime ) ;
worker . start ( ) ;
return worker ;
2006-04-03 17:36:53 +02:00
}
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
private class resultWorker extends Thread {
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
private indexRWIEntry entry ; // entry this thread is working on
private long timeout ; // the date until this thread should try to work
private long sleeptime ; // the sleeptime of this thread at the beginning of its life
private int id ;
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
public resultWorker ( int id , long lifetime ) {
this . id = id ;
this . timeout = System . currentTimeMillis ( ) + lifetime ;
this . sleeptime = lifetime / 10 * id ;
this . entry = null ;
}
public void run ( ) {
// sleep first to give remote loading threads a chance to fetch entries
2007-09-08 13:50:19 +02:00
if ( anyRemoteSearchAlive ( ) ) try { Thread . sleep ( this . sleeptime ) ; } catch ( InterruptedException e1 ) { }
2007-09-04 01:43:55 +02:00
// start fetching urls and snippets
2007-09-08 13:50:19 +02:00
while ( true ) {
if ( resultList . size ( ) > query . neededResults ( ) + query . displayResults ( ) ) break ; // computed enough
if ( System . currentTimeMillis ( ) > this . timeout ) break ; // time is over
2007-09-04 01:43:55 +02:00
// try secondary search
2007-09-07 13:45:38 +02:00
prepareSecondarySearch ( ) ; // will be executed only once
2007-09-04 01:43:55 +02:00
// fetch next entry to work on
this . entry = null ;
entry = nextOrder ( ) ;
if ( entry = = null ) {
2007-09-08 13:50:19 +02:00
if ( anyRemoteSearchAlive ( ) ) {
// wait and try again
try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { }
continue ;
} else {
2007-11-07 23:38:09 +01:00
// we will not see that there come more results in
2007-09-08 13:50:19 +02:00
break ;
}
2007-09-04 01:43:55 +02:00
}
2007-09-07 13:45:38 +02:00
indexURLEntry page = wordIndex . loadedURL . load ( entry . urlHash ( ) , entry ) ;
if ( page = = null ) {
registerFailure ( entry . urlHash ( ) , " url does not exist in lurl-db " ) ;
continue ;
}
2007-10-03 18:42:11 +02:00
ResultEntry resultEntry = obtainResultEntry ( page , 2 ) ;
2007-09-04 01:43:55 +02:00
if ( resultEntry = = null ) continue ; // the entry had some problems, cannot be used
2007-09-26 12:11:50 +02:00
urlRetrievalAllTime + = resultEntry . dbRetrievalTime ;
snippetComputationAllTime + = resultEntry . snippetComputationTime ;
2007-09-04 01:43:55 +02:00
// place the result to the result vector
synchronized ( resultList ) {
resultList . add ( resultEntry ) ;
}
// add references
synchronized ( rankedCache ) {
rankedCache . addReferences ( resultEntry ) ;
}
System . out . println ( " DEBUG SNIPPET_LOADING: thread " + id + " got " + resultEntry . url ( ) ) ;
}
2007-09-08 13:50:19 +02:00
serverLog . logInfo ( " SEARCH " , " resultWorker thread " + id + " terminated " ) ;
2007-09-04 01:43:55 +02:00
}
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
private indexRWIEntry nextOrder ( ) {
synchronized ( rankedCache ) {
2007-11-07 23:38:09 +01:00
Iterator i = rankedCache . entries ( ) ;
indexRWIEntry entry ;
2007-09-04 01:43:55 +02:00
String urlhash ;
2007-11-07 23:38:09 +01:00
while ( i . hasNext ( ) ) {
entry = ( indexRWIEntry ) i . next ( ) ;
urlhash = entry . urlHash ( ) ;
2007-09-04 01:43:55 +02:00
if ( ( anyFailureWith ( urlhash ) ) | | ( anyWorkerWith ( urlhash ) ) | | ( anyResultWith ( urlhash ) ) ) continue ;
2007-11-07 23:38:09 +01:00
return entry ;
2007-08-28 14:15:46 +02:00
}
}
2007-09-04 01:43:55 +02:00
return null ; // no more entries available
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
private boolean anyWorkerWith ( String urlhash ) {
for ( int i = 0 ; i < workerThreadCount ; i + + ) {
if ( ( workerThreads [ i ] = = null ) | | ( workerThreads [ i ] = = this ) ) continue ;
if ( ( workerThreads [ i ] . entry ! = null ) & & ( workerThreads [ i ] . entry . urlHash ( ) . equals ( urlhash ) ) ) return true ;
}
return false ;
}
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
private boolean anyResultWith ( String urlhash ) {
for ( int i = 0 ; i < resultList . size ( ) ; i + + ) {
if ( ( ( ResultEntry ) resultList . get ( i ) ) . urlentry . hash ( ) . equals ( urlhash ) ) return true ;
}
return false ;
}
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
private boolean anyFailureWith ( String urlhash ) {
return ( failedURLs . get ( urlhash ) ! = null ) ;
}
}
private void registerFailure ( String urlhash , String reason ) {
this . failedURLs . put ( urlhash , reason ) ;
serverLog . logInfo ( " search " , " sorted out hash " + urlhash + " during search: " + reason ) ;
}
public ResultEntry oneResult ( int item ) {
// first sleep a while to give accumulation threads a chance to work
long sleeptime = this . eventTime + ( this . query . maximumTime / this . query . displayResults ( ) * ( ( item % this . query . displayResults ( ) ) + 1 ) ) - System . currentTimeMillis ( ) ;
2007-09-08 13:50:19 +02:00
if ( ( anyWorkerAlive ( ) ) & & ( sleeptime > 0 ) ) {
try { Thread . sleep ( sleeptime ) ; } catch ( InterruptedException e ) { }
}
2007-08-28 14:15:46 +02:00
2007-09-08 13:50:19 +02:00
// if there are less than 10 more results available, sleep some extra time to get a chance that the "common sense" ranking algorithm can work
if ( ( this . resultList . size ( ) < = item + 10 ) & & ( anyWorkerAlive ( ) ) ) {
try { Thread . sleep ( 300 ) ; } catch ( InterruptedException e ) { }
}
// then sleep until any result is available (that should not happen)
2007-09-04 01:43:55 +02:00
while ( ( this . resultList . size ( ) < = item ) & & ( anyWorkerAlive ( ) ) ) {
try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { }
}
2007-08-28 14:15:46 +02:00
2007-09-04 01:43:55 +02:00
// finally, if there is something, return the result
synchronized ( this . resultList ) {
2007-09-08 13:50:19 +02:00
// check if we have enough entries
2007-09-04 01:43:55 +02:00
if ( this . resultList . size ( ) < = item ) return null ;
2007-09-08 13:50:19 +02:00
// fetch the best entry from the resultList, not the entry from item position
// whenever a specific entry was switched in its position and was returned here
// a moving pointer is set to assign that item position as not changeable
int bestpick = postRankingFavourite ( item ) ;
if ( bestpick ! = item ) {
// switch the elements
ResultEntry buf = ( ResultEntry ) this . resultList . get ( bestpick ) ;
serverLog . logInfo ( " SEARCH_POSTRANKING " , " prefering [ " + bestpick + " ] " + buf . urlstring ( ) + " over [ " + item + " ] " + ( ( ResultEntry ) this . resultList . get ( item ) ) . urlstring ( ) ) ;
this . resultList . set ( bestpick , ( ResultEntry ) this . resultList . get ( item ) ) ;
this . resultList . set ( item , buf ) ;
}
2007-09-04 01:43:55 +02:00
2007-09-08 13:50:19 +02:00
//this.resultListLock = item; // lock the element; be prepared to return it
2007-09-04 01:43:55 +02:00
return ( ResultEntry ) this . resultList . get ( item ) ;
}
}
2007-09-08 13:50:19 +02:00
private int postRankingFavourite ( int item ) {
// do a post-ranking on resultList, which should be locked upon time of this call
long rank , bestrank = 0 ;
int bestitem = item ;
ResultEntry entry ;
for ( int i = item ; i < this . resultList . size ( ) ; i + + ) {
entry = ( ResultEntry ) this . resultList . get ( i ) ;
rank = this . ranking . postRanking ( this . query , this . references ( 10 ) , entry , item ) ;
if ( rank > bestrank ) {
bestrank = rank ;
bestitem = i ;
}
}
return bestitem ;
}
/ *
public void removeRedundant ( ) {
// remove all urls from the pageAcc structure that occur double by specific redundancy rules
// a link is redundant, if a sub-path of the url is cited before. redundant urls are removed
// we find redundant urls by iteration over all elements in pageAcc
Iterator i = pageAcc . entrySet ( ) . iterator ( ) ;
HashMap paths = new HashMap ( ) ; // a url-subpath to pageAcc-key relation
Map . Entry entry ;
// first scan all entries and find all urls that are referenced
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
paths . put ( ( ( indexURLEntry ) entry . getValue ( ) ) . comp ( ) . url ( ) . toNormalform ( true , true ) , entry . getKey ( ) ) ;
//if (path != null) path = shortenPath(path);
//if (path != null) paths.put(path, entry.getKey());
}
// now scan the pageAcc again and remove all redundant urls
i = pageAcc . entrySet ( ) . iterator ( ) ;
String shorten ;
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
shorten = shortenPath ( ( ( indexURLEntry ) entry . getValue ( ) ) . comp ( ) . url ( ) . toNormalform ( true , true ) ) ;
// scan all subpaths of the url
while ( shorten ! = null ) {
if ( pageAcc . size ( ) < = query . wantedResults ) break ;
if ( paths . containsKey ( shorten ) ) {
//System.out.println("deleting path from search result: " + path + " is redundant to " + shorten);
try {
i . remove ( ) ;
} catch ( IllegalStateException e ) {
}
}
shorten = shortenPath ( shorten ) ;
}
}
}
private static String shortenPath ( String path ) {
int pos = path . lastIndexOf ( '/' ) ;
if ( pos < 0 ) return null ;
return path . substring ( 0 , pos ) ;
}
* /
2007-09-04 01:43:55 +02:00
public ArrayList completeResults ( long waitingtime ) {
long timeout = System . currentTimeMillis ( ) + waitingtime ;
while ( ( this . resultList . size ( ) < query . neededResults ( ) ) & & ( anyWorkerAlive ( ) ) & & ( System . currentTimeMillis ( ) < timeout ) ) {
try { Thread . sleep ( 200 ) ; } catch ( InterruptedException e ) { }
}
return this . resultList ;
}
boolean secondarySearchStartet = false ;
2007-08-25 01:12:59 +02:00
2006-09-13 19:13:28 +02:00
private void prepareSecondarySearch ( ) {
2007-09-04 01:43:55 +02:00
if ( secondarySearchStartet ) return ; // dont do this twice
2006-09-13 19:13:28 +02:00
2007-09-04 01:43:55 +02:00
if ( ( rcAbstracts = = null ) | | ( rcAbstracts . size ( ) ! = query . queryHashes . size ( ) ) ) return ; // secondary search not possible (yet)
this . secondarySearchStartet = true ;
2006-09-13 19:13:28 +02:00
2007-09-04 01:43:55 +02:00
// catch up index abstracts and join them; then call peers again to submit their urls
System . out . println ( " DEBUG-INDEXABSTRACT: " + rcAbstracts . size ( ) + " word references catched, " + query . queryHashes . size ( ) + " needed " ) ;
2006-09-13 19:13:28 +02:00
Iterator i = rcAbstracts . entrySet ( ) . iterator ( ) ;
Map . Entry entry ;
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
System . out . println ( " DEBUG-INDEXABSTRACT: hash " + ( String ) entry . getKey ( ) + " : " + ( ( query . queryHashes . contains ( ( String ) entry . getKey ( ) ) ) ? " NEEDED " : " NOT NEEDED " ) + " ; " + ( ( TreeMap ) entry . getValue ( ) ) . size ( ) + " entries " ) ;
}
2007-04-10 14:27:03 +02:00
TreeMap abstractJoin = ( rcAbstracts . size ( ) = = query . queryHashes . size ( ) ) ? kelondroMSetTools . joinConstructive ( rcAbstracts . values ( ) , true ) : new TreeMap ( ) ;
2006-09-13 19:13:28 +02:00
if ( abstractJoin . size ( ) = = 0 ) {
System . out . println ( " DEBUG-INDEXABSTRACT: no success using index abstracts from remote peers " ) ;
} else {
System . out . println ( " DEBUG-INDEXABSTRACT: index abstracts delivered " + abstractJoin . size ( ) + " additional results for secondary search " ) ;
// generate query for secondary search
TreeMap secondarySearchURLs = new TreeMap ( ) ; // a (peerhash:urlhash-liststring) mapping
Iterator i1 = abstractJoin . entrySet ( ) . iterator ( ) ;
Map . Entry entry1 ;
String url , urls , peer , peers ;
2007-10-01 14:30:23 +02:00
String mypeerhash = yacyCore . seedDB . mySeed ( ) . hash ;
2006-09-16 02:07:09 +02:00
boolean mypeerinvolved = false ;
2006-10-31 03:45:41 +01:00
int mypeercount ;
2006-09-13 19:13:28 +02:00
while ( i1 . hasNext ( ) ) {
entry1 = ( Map . Entry ) i1 . next ( ) ;
url = ( String ) entry1 . getKey ( ) ;
peers = ( String ) entry1 . getValue ( ) ;
System . out . println ( " DEBUG-INDEXABSTRACT: url " + url + " : from peers " + peers ) ;
2006-10-31 03:45:41 +01:00
mypeercount = 0 ;
2006-09-13 19:13:28 +02:00
for ( int j = 0 ; j < peers . length ( ) ; j = j + 12 ) {
peer = peers . substring ( j , j + 12 ) ;
2006-10-31 03:45:41 +01:00
if ( ( peer . equals ( mypeerhash ) ) & & ( mypeercount + + > 1 ) ) continue ;
//if (peers.indexOf(peer) < j) continue; // avoid doubles that may appear in the abstractJoin
2006-09-13 19:13:28 +02:00
urls = ( String ) secondarySearchURLs . get ( peer ) ;
urls = ( urls = = null ) ? url : urls + url ;
secondarySearchURLs . put ( peer , urls ) ;
}
2006-10-31 03:45:41 +01:00
if ( mypeercount = = 1 ) mypeerinvolved = true ;
2006-09-13 19:13:28 +02:00
}
// compute words for secondary search and start the secondary searches
i1 = secondarySearchURLs . entrySet ( ) . iterator ( ) ;
String words ;
2006-09-16 02:07:09 +02:00
secondarySearchThreads = new yacySearch [ ( mypeerinvolved ) ? secondarySearchURLs . size ( ) - 1 : secondarySearchURLs . size ( ) ] ;
2006-09-13 19:13:28 +02:00
int c = 0 ;
while ( i1 . hasNext ( ) ) {
entry1 = ( Map . Entry ) i1 . next ( ) ;
peer = ( String ) entry1 . getKey ( ) ;
2006-09-16 02:07:09 +02:00
if ( peer . equals ( mypeerhash ) ) continue ; // we dont need to ask ourself
2006-09-13 19:13:28 +02:00
urls = ( String ) entry1 . getValue ( ) ;
words = wordsFromPeer ( peer , urls ) ;
2006-10-31 03:45:41 +01:00
System . out . println ( " DEBUG-INDEXABSTRACT ***: peer " + peer + " has urls: " + urls ) ;
System . out . println ( " DEBUG-INDEXABSTRACT ***: peer " + peer + " from words: " + words ) ;
2006-09-13 19:13:28 +02:00
secondarySearchThreads [ c + + ] = yacySearch . secondaryRemoteSearch (
2007-09-04 01:43:55 +02:00
words , " " , urls , wordIndex , this . rankedCache , peer , plasmaSwitchboard . urlBlacklist ,
ranking , query . constraint , preselectedPeerHashes ) ;
2006-09-13 19:13:28 +02:00
}
}
}
private String wordsFromPeer ( String peerhash , String urls ) {
Map . Entry entry ;
String word , peerlist , url , wordlist = " " ;
TreeMap urlPeerlist ;
int p ;
boolean hasURL ;
synchronized ( rcAbstracts ) {
Iterator i = rcAbstracts . entrySet ( ) . iterator ( ) ;
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
word = ( String ) entry . getKey ( ) ;
urlPeerlist = ( TreeMap ) entry . getValue ( ) ;
hasURL = true ;
for ( int j = 0 ; j < urls . length ( ) ; j = j + 12 ) {
url = urls . substring ( j , j + 12 ) ;
peerlist = ( String ) urlPeerlist . get ( url ) ;
p = ( peerlist = = null ) ? - 1 : peerlist . indexOf ( peerhash ) ;
if ( ( p < 0 ) | | ( p % 12 ! = 0 ) ) {
hasURL = false ;
break ;
}
}
if ( hasURL ) wordlist + = word ;
}
}
return wordlist ;
}
2007-08-26 20:18:35 +02:00
public void remove ( String urlhash ) {
// removes the url hash reference from last search result
2007-09-04 01:43:55 +02:00
/*indexRWIEntry e =*/ this . rankedCache . remove ( urlhash ) ;
//assert e != null;
2007-08-26 20:18:35 +02:00
}
2006-09-13 19:13:28 +02:00
2007-09-08 13:50:19 +02:00
public Set references ( int count ) {
// returns a set of words that are computed as toplist
2007-09-07 13:45:38 +02:00
return this . rankedCache . getReferences ( count ) ;
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
public static class ResultEntry {
2007-09-26 12:11:50 +02:00
// payload objects
2007-08-28 14:15:46 +02:00
private indexURLEntry urlentry ;
private indexURLEntry . Components urlcomps ; // buffer for components
private String alternative_urlstring ;
private String alternative_urlname ;
2007-09-04 01:43:55 +02:00
private plasmaSnippetCache . TextSnippet textSnippet ;
private ArrayList /* of plasmaSnippetCache.MediaSnippet */ mediaSnippets ;
2007-08-28 14:15:46 +02:00
2007-09-26 12:11:50 +02:00
// statistic objects
public long dbRetrievalTime , snippetComputationTime ;
public ResultEntry ( indexURLEntry urlentry , plasmaWordIndex wordIndex , plasmaSnippetCache . TextSnippet textSnippet , ArrayList mediaSnippets ,
long dbRetrievalTime , long snippetComputationTime ) {
2007-08-28 14:15:46 +02:00
this . urlentry = urlentry ;
this . urlcomps = urlentry . comp ( ) ;
this . alternative_urlstring = null ;
this . alternative_urlname = null ;
2007-09-04 01:43:55 +02:00
this . textSnippet = textSnippet ;
this . mediaSnippets = mediaSnippets ;
2007-09-26 12:11:50 +02:00
this . dbRetrievalTime = dbRetrievalTime ;
this . snippetComputationTime = snippetComputationTime ;
2007-08-28 14:15:46 +02:00
String host = urlcomps . url ( ) . getHost ( ) ;
if ( host . endsWith ( " .yacyh " ) ) {
// translate host into current IP
int p = host . indexOf ( " . " ) ;
String hash = yacySeed . hexHash2b64Hash ( host . substring ( p + 1 , host . length ( ) - 6 ) ) ;
yacySeed seed = yacyCore . seedDB . getConnected ( hash ) ;
String filename = urlcomps . url ( ) . getFile ( ) ;
String address = null ;
if ( ( seed = = null ) | | ( ( address = seed . getPublicAddress ( ) ) = = null ) ) {
// seed is not known from here
try {
wordIndex . removeWordReferences (
plasmaCondenser . getWords (
( " yacyshare " +
filename . replace ( '?' , ' ' ) +
" " +
urlcomps . title ( ) ) . getBytes ( ) , " UTF-8 " ) . keySet ( ) ,
urlentry . hash ( ) ) ;
wordIndex . loadedURL . remove ( urlentry . hash ( ) ) ; // clean up
throw new RuntimeException ( " index void " ) ;
} catch ( UnsupportedEncodingException e ) {
throw new RuntimeException ( " parser failed: " + e . getMessage ( ) ) ;
}
}
alternative_urlstring = " http:// " + address + " / " + host . substring ( 0 , p ) + filename ;
alternative_urlname = " http://share. " + seed . getName ( ) + " .yacy " + filename ;
if ( ( p = alternative_urlname . indexOf ( " ? " ) ) > 0 ) alternative_urlname = alternative_urlname . substring ( 0 , p ) ;
}
}
2007-09-08 13:50:19 +02:00
2007-08-28 14:15:46 +02:00
public String hash ( ) {
return urlentry . hash ( ) ;
}
2007-09-05 11:01:35 +02:00
public yacyURL url ( ) {
2007-08-28 14:15:46 +02:00
return urlcomps . url ( ) ;
}
public kelondroBitfield flags ( ) {
return urlentry . flags ( ) ;
}
public String urlstring ( ) {
return ( alternative_urlstring = = null ) ? urlcomps . url ( ) . toNormalform ( false , true ) : alternative_urlstring ;
}
public String urlname ( ) {
return ( alternative_urlname = = null ) ? urlcomps . url ( ) . toNormalform ( false , true ) : alternative_urlname ;
}
public String title ( ) {
return urlcomps . title ( ) ;
}
2007-09-04 01:43:55 +02:00
public plasmaSnippetCache . TextSnippet textSnippet ( ) {
return this . textSnippet ;
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
public ArrayList /* of plasmaSnippetCache.MediaSnippet */ mediaSnippets ( ) {
return this . mediaSnippets ;
2007-08-28 14:15:46 +02:00
}
public Date modified ( ) {
return urlentry . moddate ( ) ;
}
public int filesize ( ) {
return urlentry . size ( ) ;
}
2007-09-08 13:50:19 +02:00
public int limage ( ) {
return urlentry . limage ( ) ;
}
public int laudio ( ) {
return urlentry . laudio ( ) ;
}
public int lvideo ( ) {
return urlentry . lvideo ( ) ;
}
public int lapp ( ) {
return urlentry . lapp ( ) ;
}
2007-08-28 14:15:46 +02:00
public indexRWIEntry word ( ) {
return urlentry . word ( ) ;
}
2007-09-04 01:43:55 +02:00
public boolean hasTextSnippet ( ) {
return ( this . textSnippet ! = null ) & & ( this . textSnippet . getErrorCode ( ) < 11 ) ;
2007-08-28 14:15:46 +02:00
}
2007-09-04 01:43:55 +02:00
public boolean hasMediaSnippets ( ) {
return ( this . mediaSnippets ! = null ) & & ( this . mediaSnippets . size ( ) > 0 ) ;
2007-08-28 14:15:46 +02:00
}
public String resource ( ) {
// generate transport resource
2007-09-04 01:43:55 +02:00
if ( ( textSnippet ! = null ) & & ( textSnippet . exists ( ) ) ) {
return urlentry . toString ( textSnippet . getLineRaw ( ) ) ;
2007-08-28 14:15:46 +02:00
} else {
return urlentry . toString ( ) ;
}
}
}
2005-10-10 02:32:15 +02:00
}