2005-10-10 02:32:15 +02:00
// plasmaSearchEvent.java
// -----------------------
// part of YACY
// (C) by Michael Peter Christen; mc@anomic.de
// first published on http://www.anomic.de
// Frankfurt, Germany, 2005
// Created: 10.10.2005
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// Using this software in any meaning (reading, learning, copying, compiling,
// running) means that you agree that the Author(s) is (are) not responsible
// for cost, loss of data or any harm that may be caused directly or indirectly
// by usage of this softare or this documentation. The usage of this software
// is on your own risk. The installation and usage (starting/running) of this
// software may allow other people or application to access your computer and
// any attached devices and is highly dependent on the configuration of the
// software which must be done by the user of the software; the author(s) is
// (are) also not responsible for proper configuration and usage of the
// software, even if provoked by documentation provided together with
// the software.
//
// Any changes to this file according to the GPL as documented in the file
// gpl.txt aside this file in the shipment you received can be done to the
// lines that follows this copyright notice here, but changes must not be
// done inside the copyright notive above. A re-distribution must contain
// the intact and unchanged copyright notice.
// Contributions and changes to the program code must be marked as such.
package de.anomic.plasma ;
2006-09-11 00:36:47 +02:00
import java.util.Collection ;
2006-11-06 03:05:39 +01:00
import java.util.HashMap ;
2006-09-30 00:27:20 +02:00
import java.util.HashSet ;
2005-10-11 09:06:33 +02:00
import java.util.Iterator ;
2006-09-11 00:36:47 +02:00
import java.util.Map ;
import java.util.Set ;
2006-09-11 12:39:25 +02:00
import java.util.TreeMap ;
2005-10-12 14:28:49 +02:00
2006-09-30 00:27:20 +02:00
import de.anomic.index.indexContainer ;
2007-03-21 16:35:35 +01:00
import de.anomic.index.indexRWIEntry ;
2006-11-08 17:17:47 +01:00
import de.anomic.index.indexURLEntry ;
2005-10-12 14:28:49 +02:00
import de.anomic.kelondro.kelondroException ;
2006-09-12 13:13:27 +02:00
import de.anomic.kelondro.kelondroMSetTools ;
2005-10-12 14:28:49 +02:00
import de.anomic.server.logging.serverLog ;
2006-09-16 02:07:09 +02:00
import de.anomic.yacy.yacyCore ;
2005-10-13 15:57:15 +02:00
import de.anomic.yacy.yacySearch ;
2005-10-10 02:32:15 +02:00
2005-12-15 14:21:42 +01:00
public final class plasmaSearchEvent extends Thread implements Runnable {
2005-10-10 02:32:15 +02:00
2005-10-24 02:34:15 +02:00
public static plasmaSearchEvent lastEvent = null ;
2005-12-15 14:21:42 +01:00
private static HashSet flushThreads = new HashSet ( ) ;
2005-10-24 02:34:15 +02:00
2005-10-12 14:28:49 +02:00
private serverLog log ;
2005-10-11 09:06:33 +02:00
private plasmaSearchQuery query ;
2006-02-05 00:51:00 +01:00
private plasmaSearchRankingProfile ranking ;
2005-10-12 14:28:49 +02:00
private plasmaWordIndex wordIndex ;
private plasmaCrawlLURL urlStore ;
private plasmaSnippetCache snippetCache ;
2006-09-11 12:39:25 +02:00
private indexContainer rcContainers ; // cache for results
2006-10-04 00:55:59 +02:00
private int rcContainerFlushCount ;
2006-09-12 02:42:42 +02:00
private Map rcAbstracts ; // cache for index abstracts; word:TreeMap mapping where the embedded TreeMap is a urlhash:peerlist relation
2006-02-05 00:11:31 +01:00
private plasmaSearchTimingProfile profileLocal , profileGlobal ;
2006-09-08 03:26:06 +02:00
private boolean postsort ;
2006-09-13 19:13:28 +02:00
private yacySearch [ ] primarySearchThreads , secondarySearchThreads ;
2007-01-15 02:50:57 +01:00
private long searchtime ;
private int searchcount ;
2005-10-12 14:28:49 +02:00
2006-02-05 00:51:00 +01:00
public plasmaSearchEvent ( plasmaSearchQuery query ,
plasmaSearchRankingProfile ranking ,
plasmaSearchTimingProfile localTiming ,
plasmaSearchTimingProfile remoteTiming ,
2006-09-08 03:26:06 +02:00
boolean postsort ,
2006-02-05 00:51:00 +01:00
serverLog log ,
plasmaWordIndex wordIndex ,
plasmaCrawlLURL urlStore ,
plasmaSnippetCache snippetCache ) {
2005-10-12 14:28:49 +02:00
this . log = log ;
this . wordIndex = wordIndex ;
2005-10-11 09:06:33 +02:00
this . query = query ;
2006-02-05 00:51:00 +01:00
this . ranking = ranking ;
2005-10-12 14:28:49 +02:00
this . urlStore = urlStore ;
this . snippetCache = snippetCache ;
2006-11-19 21:05:25 +01:00
this . rcContainers = wordIndex . emptyContainer ( null ) ;
2006-10-04 00:55:59 +02:00
this . rcContainerFlushCount = 0 ;
2006-10-31 03:45:41 +01:00
this . rcAbstracts = ( query . size ( ) > 1 ) ? new TreeMap ( ) : null ; // generate abstracts only for combined searches
2006-02-05 00:51:00 +01:00
this . profileLocal = localTiming ;
this . profileGlobal = remoteTiming ;
2006-09-08 03:26:06 +02:00
this . postsort = postsort ;
2006-09-13 19:13:28 +02:00
this . primarySearchThreads = null ;
this . secondarySearchThreads = null ;
2007-01-15 02:50:57 +01:00
this . searchtime = - 1 ;
this . searchcount = - 1 ;
2005-10-12 14:28:49 +02:00
}
2005-10-24 02:34:15 +02:00
public plasmaSearchQuery getQuery ( ) {
return query ;
}
2006-02-05 00:51:00 +01:00
public plasmaSearchTimingProfile getLocalTiming ( ) {
2005-10-27 02:42:08 +02:00
return profileLocal ;
}
2006-09-13 19:13:28 +02:00
public yacySearch [ ] getPrimarySearchThreads ( ) {
return primarySearchThreads ;
}
public yacySearch [ ] getSecondarySearchThreads ( ) {
return secondarySearchThreads ;
2005-10-24 02:34:15 +02:00
}
2007-01-15 02:50:57 +01:00
public HashMap resultProfile ( ) {
// generate statistics about search: query, time, etc
HashMap r = new HashMap ( ) ;
r . put ( " queryhashes " , query . queryHashes ) ;
2007-04-05 12:14:48 +02:00
r . put ( " querystring " , query . queryString ) ;
2007-01-15 02:50:57 +01:00
r . put ( " querycount " , new Integer ( query . wantedResults ) ) ;
r . put ( " querytime " , new Long ( query . maximumTime ) ) ;
r . put ( " resultcount " , new Integer ( this . searchcount ) ) ;
r . put ( " resulttime " , new Long ( this . searchtime ) ) ;
return r ;
}
2007-01-19 01:38:03 +01:00
public plasmaSearchPostOrder search ( ) {
2005-10-13 15:57:15 +02:00
// combine all threads
2006-04-02 20:51:18 +02:00
// we synchronize with flushThreads to allow only one local search at a time,
// so all search tasks are queued
synchronized ( flushThreads ) {
2007-01-15 02:50:57 +01:00
long start = System . currentTimeMillis ( ) ;
2007-01-19 01:38:03 +01:00
plasmaSearchPostOrder result ;
2006-04-03 17:36:53 +02:00
if ( query . domType = = plasmaSearchQuery . SEARCHDOM_GLOBALDHT ) {
int fetchpeers = ( int ) ( query . maximumTime / 500L ) ; // number of target peers; means 10 peers in 10 seconds
if ( fetchpeers > 50 ) fetchpeers = 50 ;
if ( fetchpeers < 30 ) fetchpeers = 30 ;
// do a global search
2006-09-06 19:51:28 +02:00
// the result of the fetch is then in the rcGlobal
log . logFine ( " STARTING " + fetchpeers + " THREADS TO CATCH EACH " + profileGlobal . getTargetCount ( plasmaSearchTimingProfile . PROCESS_POSTSORT ) + " URLs WITHIN " + ( profileGlobal . duetime ( ) / 1000 ) + " SECONDS " ) ;
2006-10-31 03:45:41 +01:00
long secondaryTimeout = System . currentTimeMillis ( ) + profileGlobal . duetime ( ) / 3 * 2 ;
2006-09-12 02:42:42 +02:00
long primaryTimeout = System . currentTimeMillis ( ) + profileGlobal . duetime ( ) ;
2006-09-13 19:13:28 +02:00
primarySearchThreads = yacySearch . primaryRemoteSearches ( plasmaSearchQuery . hashSet2hashString ( query . queryHashes ) , " " ,
2006-11-19 21:05:25 +01:00
query . prefer , query . urlMask , query . maxDistance , urlStore , wordIndex , rcContainers , rcAbstracts ,
2006-11-23 03:16:30 +01:00
fetchpeers , plasmaSwitchboard . urlBlacklist , snippetCache , profileGlobal , ranking , query . constraint ) ;
2006-09-06 19:51:28 +02:00
// meanwhile do a local search
2006-09-11 13:12:42 +02:00
Map searchContainerMap = localSearchContainers ( null ) ;
2006-09-16 02:07:09 +02:00
// use the search containers to fill up rcAbstracts locally
2006-10-31 03:45:41 +01:00
/ *
if ( ( rcAbstracts ! = null ) & & ( searchContainerMap ! = null ) ) {
2006-09-16 02:07:09 +02:00
Iterator i , ci = searchContainerMap . entrySet ( ) . iterator ( ) ;
Map . Entry entry ;
String wordhash ;
indexContainer container ;
TreeMap singleAbstract ;
String mypeerhash = yacyCore . seedDB . mySeed . hash ;
while ( ci . hasNext ( ) ) {
entry = ( Map . Entry ) ci . next ( ) ;
wordhash = ( String ) entry . getKey ( ) ;
container = ( indexContainer ) entry . getValue ( ) ;
// collect all urlhashes from the container
synchronized ( rcAbstracts ) {
singleAbstract = ( TreeMap ) rcAbstracts . get ( wordhash ) ; // a mapping from url-hashes to a string of peer-hashes
if ( singleAbstract = = null ) singleAbstract = new TreeMap ( ) ;
i = container . entries ( ) ;
while ( i . hasNext ( ) ) singleAbstract . put ( ( ( indexEntry ) i . next ( ) ) . urlHash ( ) , mypeerhash ) ;
rcAbstracts . put ( wordhash , singleAbstract ) ;
}
}
}
2006-10-31 03:45:41 +01:00
* /
2006-09-16 02:07:09 +02:00
// try to pre-fetch some LURLs if there is enough time
2006-11-06 03:05:39 +01:00
indexContainer rcLocal = localSearchJoin ( searchContainerMap . values ( ) ) ;
2006-09-16 02:07:09 +02:00
prefetchLocal ( rcLocal , secondaryTimeout ) ;
2006-09-12 02:42:42 +02:00
// this is temporary debugging code to learn that the index abstracts are fetched correctly
2006-10-31 03:45:41 +01:00
while ( System . currentTimeMillis ( ) < secondaryTimeout ) {
2006-09-13 19:13:28 +02:00
if ( yacySearch . remainingWaiting ( primarySearchThreads ) = = 0 ) break ; // all threads have finished
2006-09-12 02:42:42 +02:00
try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { }
}
2006-10-31 03:45:41 +01:00
// evaluate index abstracts and start a secondary search
if ( rcAbstracts ! = null ) prepareSecondarySearch ( ) ;
2006-09-06 19:51:28 +02:00
// catch up global results:
2006-09-12 02:42:42 +02:00
// wait until primary timeout passed
while ( System . currentTimeMillis ( ) < primaryTimeout ) {
2006-09-13 19:13:28 +02:00
if ( ( yacySearch . remainingWaiting ( primarySearchThreads ) = = 0 ) & &
( ( secondarySearchThreads = = null ) | | ( yacySearch . remainingWaiting ( secondarySearchThreads ) = = 0 ) ) ) break ; // all threads have finished
2006-09-06 19:51:28 +02:00
try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { }
}
2006-10-31 03:45:41 +01:00
2006-09-11 12:39:25 +02:00
int globalContributions = rcContainers . size ( ) ;
2006-09-06 19:51:28 +02:00
// finished searching
2006-04-03 17:36:53 +02:00
log . logFine ( " SEARCH TIME AFTER GLOBAL-TRIGGER TO " + fetchpeers + " PEERS: " + ( ( System . currentTimeMillis ( ) - start ) / 1000 ) + " seconds " ) ;
// combine the result and order
2007-01-18 01:26:16 +01:00
result = orderFinal ( rcLocal ) ;
2006-11-05 03:10:40 +01:00
if ( result ! = null ) {
result . globalContributions = globalContributions ;
2006-04-03 17:36:53 +02:00
2006-11-05 03:10:40 +01:00
// flush results in a separate thread
this . start ( ) ; // start to flush results
}
2006-04-03 17:36:53 +02:00
} else {
2006-09-11 13:12:42 +02:00
Map searchContainerMap = localSearchContainers ( null ) ;
2006-11-05 20:07:19 +01:00
indexContainer rcLocal = ( searchContainerMap = = null ) ? wordIndex . emptyContainer ( null ) : localSearchJoin ( searchContainerMap . values ( ) ) ;
2007-01-18 01:26:16 +01:00
result = orderFinal ( rcLocal ) ;
2006-12-13 02:39:34 +01:00
result . globalContributions = 0 ;
2006-04-02 20:51:18 +02:00
}
2007-01-18 01:26:16 +01:00
// log the event
log . logFine ( " SEARCHRESULT: " + profileLocal . reportToString ( ) ) ;
// prepare values for statistics
lastEvent = this ;
this . searchtime = System . currentTimeMillis ( ) - start ;
this . searchcount = result . filteredResults ;
// return search result
return result ;
2006-04-03 17:36:53 +02:00
}
}
2006-09-06 19:51:28 +02:00
2006-09-13 19:13:28 +02:00
private void prepareSecondarySearch ( ) {
// catch up index abstracts and join them; then call peers again to submit their urls
System . out . println ( " DEBUG-INDEXABSTRACT: " + rcAbstracts . size ( ) + " word references catched, " + query . size ( ) + " needed " ) ;
if ( rcAbstracts . size ( ) ! = query . size ( ) ) return ; // secondary search not possible
Iterator i = rcAbstracts . entrySet ( ) . iterator ( ) ;
Map . Entry entry ;
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
System . out . println ( " DEBUG-INDEXABSTRACT: hash " + ( String ) entry . getKey ( ) + " : " + ( ( query . queryHashes . contains ( ( String ) entry . getKey ( ) ) ) ? " NEEDED " : " NOT NEEDED " ) + " ; " + ( ( TreeMap ) entry . getValue ( ) ) . size ( ) + " entries " ) ;
}
TreeMap abstractJoin = ( rcAbstracts . size ( ) = = query . size ( ) ) ? kelondroMSetTools . joinConstructive ( rcAbstracts . values ( ) , true ) : new TreeMap ( ) ;
if ( abstractJoin . size ( ) = = 0 ) {
System . out . println ( " DEBUG-INDEXABSTRACT: no success using index abstracts from remote peers " ) ;
} else {
System . out . println ( " DEBUG-INDEXABSTRACT: index abstracts delivered " + abstractJoin . size ( ) + " additional results for secondary search " ) ;
// generate query for secondary search
TreeMap secondarySearchURLs = new TreeMap ( ) ; // a (peerhash:urlhash-liststring) mapping
Iterator i1 = abstractJoin . entrySet ( ) . iterator ( ) ;
Map . Entry entry1 ;
String url , urls , peer , peers ;
2006-09-16 02:07:09 +02:00
String mypeerhash = yacyCore . seedDB . mySeed . hash ;
boolean mypeerinvolved = false ;
2006-10-31 03:45:41 +01:00
int mypeercount ;
2006-09-13 19:13:28 +02:00
while ( i1 . hasNext ( ) ) {
entry1 = ( Map . Entry ) i1 . next ( ) ;
url = ( String ) entry1 . getKey ( ) ;
peers = ( String ) entry1 . getValue ( ) ;
System . out . println ( " DEBUG-INDEXABSTRACT: url " + url + " : from peers " + peers ) ;
2006-10-31 03:45:41 +01:00
mypeercount = 0 ;
2006-09-13 19:13:28 +02:00
for ( int j = 0 ; j < peers . length ( ) ; j = j + 12 ) {
peer = peers . substring ( j , j + 12 ) ;
2006-10-31 03:45:41 +01:00
if ( ( peer . equals ( mypeerhash ) ) & & ( mypeercount + + > 1 ) ) continue ;
//if (peers.indexOf(peer) < j) continue; // avoid doubles that may appear in the abstractJoin
2006-09-13 19:13:28 +02:00
urls = ( String ) secondarySearchURLs . get ( peer ) ;
urls = ( urls = = null ) ? url : urls + url ;
secondarySearchURLs . put ( peer , urls ) ;
}
2006-10-31 03:45:41 +01:00
if ( mypeercount = = 1 ) mypeerinvolved = true ;
2006-09-13 19:13:28 +02:00
}
// compute words for secondary search and start the secondary searches
i1 = secondarySearchURLs . entrySet ( ) . iterator ( ) ;
String words ;
2006-09-16 02:07:09 +02:00
secondarySearchThreads = new yacySearch [ ( mypeerinvolved ) ? secondarySearchURLs . size ( ) - 1 : secondarySearchURLs . size ( ) ] ;
2006-09-13 19:13:28 +02:00
int c = 0 ;
while ( i1 . hasNext ( ) ) {
entry1 = ( Map . Entry ) i1 . next ( ) ;
peer = ( String ) entry1 . getKey ( ) ;
2006-09-16 02:07:09 +02:00
if ( peer . equals ( mypeerhash ) ) continue ; // we dont need to ask ourself
2006-09-13 19:13:28 +02:00
urls = ( String ) entry1 . getValue ( ) ;
words = wordsFromPeer ( peer , urls ) ;
2006-10-31 03:45:41 +01:00
System . out . println ( " DEBUG-INDEXABSTRACT ***: peer " + peer + " has urls: " + urls ) ;
System . out . println ( " DEBUG-INDEXABSTRACT ***: peer " + peer + " from words: " + words ) ;
2006-09-13 19:13:28 +02:00
secondarySearchThreads [ c + + ] = yacySearch . secondaryRemoteSearch (
2006-11-19 21:05:25 +01:00
words , urls , urlStore , wordIndex , rcContainers , peer , plasmaSwitchboard . urlBlacklist , snippetCache ,
2006-11-23 03:16:30 +01:00
profileGlobal , ranking , query . constraint ) ;
2006-09-13 19:13:28 +02:00
}
}
}
private String wordsFromPeer ( String peerhash , String urls ) {
Map . Entry entry ;
String word , peerlist , url , wordlist = " " ;
TreeMap urlPeerlist ;
int p ;
boolean hasURL ;
synchronized ( rcAbstracts ) {
Iterator i = rcAbstracts . entrySet ( ) . iterator ( ) ;
while ( i . hasNext ( ) ) {
entry = ( Map . Entry ) i . next ( ) ;
word = ( String ) entry . getKey ( ) ;
urlPeerlist = ( TreeMap ) entry . getValue ( ) ;
hasURL = true ;
for ( int j = 0 ; j < urls . length ( ) ; j = j + 12 ) {
url = urls . substring ( j , j + 12 ) ;
peerlist = ( String ) urlPeerlist . get ( url ) ;
p = ( peerlist = = null ) ? - 1 : peerlist . indexOf ( peerhash ) ;
if ( ( p < 0 ) | | ( p % 12 ! = 0 ) ) {
hasURL = false ;
break ;
}
}
if ( hasURL ) wordlist + = word ;
}
}
return wordlist ;
}
2006-09-11 00:36:47 +02:00
public Map localSearchContainers ( Set urlselection ) {
2006-09-11 13:12:42 +02:00
// search for the set of hashes and return a map of of wordhash:indexContainer containing the seach result
2006-04-02 20:51:18 +02:00
2006-04-03 17:36:53 +02:00
// retrieve entities that belong to the hashes
profileLocal . startTimer ( ) ;
2006-09-11 00:36:47 +02:00
Map containers = wordIndex . getContainers (
2006-04-03 17:36:53 +02:00
query . queryHashes ,
2006-09-11 00:36:47 +02:00
urlselection ,
2006-04-03 17:36:53 +02:00
true ,
true ,
profileLocal . getTargetTime ( plasmaSearchTimingProfile . PROCESS_COLLECTION ) ) ;
2006-11-06 03:05:39 +01:00
if ( ( containers . size ( ) ! = 0 ) & & ( containers . size ( ) < query . size ( ) ) ) containers = new HashMap ( ) ; // prevent that only a subset is returned
2006-04-03 17:36:53 +02:00
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_COLLECTION ) ;
2006-11-06 03:05:39 +01:00
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_COLLECTION , containers . size ( ) ) ;
2006-04-02 20:51:18 +02:00
2006-09-06 19:51:28 +02:00
return containers ;
}
2006-09-11 00:36:47 +02:00
public indexContainer localSearchJoin ( Collection containers ) {
2006-09-06 19:51:28 +02:00
// join a search result and return the joincount (number of pages after join)
// since this is a conjunction we return an empty entity if any word is not known
2006-11-05 20:07:19 +01:00
if ( containers = = null ) return wordIndex . emptyContainer ( null ) ;
2006-04-03 17:36:53 +02:00
// join the result
profileLocal . startTimer ( ) ;
2006-09-12 13:13:27 +02:00
indexContainer rcLocal = indexContainer . joinContainer ( containers ,
2006-04-03 17:36:53 +02:00
profileLocal . getTargetTime ( plasmaSearchTimingProfile . PROCESS_JOIN ) ,
query . maxDistance ) ;
2006-11-06 03:05:39 +01:00
if ( rcLocal = = null ) rcLocal = wordIndex . emptyContainer ( null ) ;
2006-04-03 17:36:53 +02:00
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_JOIN ) ;
2006-11-06 03:05:39 +01:00
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_JOIN , rcLocal . size ( ) ) ;
2006-04-03 17:36:53 +02:00
2006-09-06 19:51:28 +02:00
return rcLocal ;
2005-10-10 02:32:15 +02:00
}
2007-01-19 01:38:03 +01:00
public plasmaSearchPostOrder orderFinal ( indexContainer rcLocal ) {
2005-10-13 15:57:15 +02:00
// we collect the urlhashes and construct a list with urlEntry objects
// attention: if minEntries is too high, this method will not terminate within the maxTime
2005-10-12 14:28:49 +02:00
2006-11-05 20:07:19 +01:00
assert ( rcLocal ! = null ) ;
2006-11-19 21:05:25 +01:00
indexContainer searchResult = wordIndex . emptyContainer ( null ) ;
2006-02-05 00:11:31 +01:00
long preorderTime = profileLocal . getTargetTime ( plasmaSearchTimingProfile . PROCESS_PRESORT ) ;
2005-10-23 19:50:27 +02:00
profileLocal . startTimer ( ) ;
2006-04-02 22:40:07 +02:00
long pst = System . currentTimeMillis ( ) ;
2006-12-07 03:40:57 +01:00
searchResult . addAllUnique ( rcLocal ) ;
searchResult . addAllUnique ( rcContainers ) ;
2007-03-14 09:55:05 +01:00
searchResult . sort ( ) ;
2006-12-07 03:40:57 +01:00
searchResult . uniq ( ) ;
2006-04-02 22:40:07 +02:00
preorderTime = preorderTime - ( System . currentTimeMillis ( ) - pst ) ;
2006-04-04 01:26:08 +02:00
if ( preorderTime < 0 ) preorderTime = 200 ;
2006-09-07 03:13:03 +02:00
plasmaSearchPreOrder preorder = new plasmaSearchPreOrder ( query , ranking , searchResult , preorderTime ) ;
2006-10-25 04:24:41 +02:00
if ( searchResult . size ( ) > query . wantedResults ) preorder . remove ( true , true ) ;
2006-02-05 00:11:31 +01:00
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_PRESORT ) ;
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_PRESORT , rcLocal . size ( ) ) ;
2005-10-23 19:50:27 +02:00
2006-04-04 01:26:08 +02:00
// start url-fetch
long postorderTime = profileLocal . getTargetTime ( plasmaSearchTimingProfile . PROCESS_POSTSORT ) ;
2006-09-13 19:13:28 +02:00
System . out . println ( " DEBUG: postorder-final (urlfetch) maxtime = " + postorderTime ) ;
2006-04-04 01:26:08 +02:00
long postorderLimitTime = ( postorderTime < 0 ) ? Long . MAX_VALUE : ( System . currentTimeMillis ( ) + postorderTime ) ;
2005-10-23 19:50:27 +02:00
profileLocal . startTimer ( ) ;
2007-01-19 01:38:03 +01:00
plasmaSearchPostOrder acc = new plasmaSearchPostOrder ( query , ranking ) ;
2006-12-13 02:39:34 +01:00
2007-03-21 16:35:35 +01:00
indexRWIEntry entry ;
2006-11-08 17:17:47 +01:00
indexURLEntry page ;
2006-09-08 03:26:06 +02:00
Long preranking ;
Object [ ] preorderEntry ;
2007-04-03 17:35:29 +02:00
indexURLEntry . Components comp ;
2007-04-05 12:14:48 +02:00
String pagetitle , pageurl , pageauthor ;
2006-02-05 00:11:31 +01:00
int minEntries = profileLocal . getTargetCount ( plasmaSearchTimingProfile . PROCESS_POSTSORT ) ;
2005-12-15 11:31:00 +01:00
try {
2007-04-03 17:35:29 +02:00
ordering : while ( preorder . hasNext ( ) ) {
2006-09-08 18:04:50 +02:00
if ( ( System . currentTimeMillis ( ) > = postorderLimitTime ) & & ( acc . sizeFetched ( ) > = minEntries ) ) break ;
2006-09-08 03:26:06 +02:00
preorderEntry = preorder . next ( ) ;
2007-03-21 16:35:35 +01:00
entry = ( indexRWIEntry ) preorderEntry [ 0 ] ;
2006-09-10 01:44:54 +02:00
// load only urls if there was not yet a root url of that hash
2006-09-08 03:26:06 +02:00
preranking = ( Long ) preorderEntry [ 1 ] ;
2006-09-06 19:51:28 +02:00
// find the url entry
2006-09-07 20:24:39 +02:00
page = urlStore . load ( entry . urlHash ( ) , entry ) ;
2006-11-24 03:46:02 +01:00
if ( page ! = null ) {
2007-04-03 17:35:29 +02:00
comp = page . comp ( ) ;
pagetitle = comp . title ( ) . toLowerCase ( ) ;
pageurl = comp . url ( ) . toString ( ) . toLowerCase ( ) ;
pageauthor = comp . author ( ) . toLowerCase ( ) ;
// check exclusion
2007-04-05 12:14:48 +02:00
if ( plasmaSearchQuery . matches ( pagetitle , query . excludeHashes ) ) continue ordering ;
if ( plasmaSearchQuery . matches ( pageurl , query . excludeHashes ) ) continue ordering ;
if ( plasmaSearchQuery . matches ( pageauthor , query . excludeHashes ) ) continue ordering ;
2007-04-03 17:35:29 +02:00
// check constraints
if ( ( ! ( query . constraint . equals ( plasmaSearchQuery . catchall_constraint ) ) ) & &
2006-11-24 03:46:02 +01:00
( query . constraint . get ( plasmaCondenser . flag_cat_indexof ) ) & &
2007-04-03 17:35:29 +02:00
( ! ( comp . title ( ) . startsWith ( " Index of " ) ) ) ) {
log . logFine ( " filtered out " + comp . url ( ) . toString ( ) ) ;
2006-11-24 03:46:02 +01:00
// filter out bad results
Iterator wi = query . queryHashes . iterator ( ) ;
2006-12-06 13:51:46 +01:00
while ( wi . hasNext ( ) ) wordIndex . removeEntry ( ( String ) wi . next ( ) , page . hash ( ) ) ;
2006-12-01 17:21:17 +01:00
} else if ( query . contentdom ! = plasmaSearchQuery . CONTENTDOM_TEXT ) {
2007-01-19 01:38:03 +01:00
if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_AUDIO ) & & ( page . laudio ( ) > 0 ) ) acc . addPage ( page , preranking ) ;
else if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_VIDEO ) & & ( page . lvideo ( ) > 0 ) ) acc . addPage ( page , preranking ) ;
else if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_IMAGE ) & & ( page . limage ( ) > 0 ) ) acc . addPage ( page , preranking ) ;
else if ( ( query . contentdom = = plasmaSearchQuery . CONTENTDOM_APP ) & & ( page . lapp ( ) > 0 ) ) acc . addPage ( page , preranking ) ;
2006-11-24 03:46:02 +01:00
} else {
2007-01-19 01:38:03 +01:00
acc . addPage ( page , preranking ) ;
2006-11-24 03:46:02 +01:00
}
}
2006-09-06 19:51:28 +02:00
}
} catch ( kelondroException ee ) {
serverLog . logSevere ( " PLASMA " , " Database Failure during plasmaSearch.order: " + ee . getMessage ( ) , ee ) ;
}
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_URLFETCH ) ;
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_URLFETCH , acc . sizeFetched ( ) ) ;
// start postsorting
profileLocal . startTimer ( ) ;
2007-01-19 01:38:03 +01:00
acc . sortPages ( postsort ) ;
2006-09-06 19:51:28 +02:00
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_POSTSORT ) ;
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_POSTSORT , acc . sizeOrdered ( ) ) ;
// apply filter
profileLocal . startTimer ( ) ;
2006-09-08 22:26:44 +02:00
acc . removeRedundant ( ) ;
2006-09-06 19:51:28 +02:00
profileLocal . setYieldTime ( plasmaSearchTimingProfile . PROCESS_FILTER ) ;
profileLocal . setYieldCount ( plasmaSearchTimingProfile . PROCESS_FILTER , acc . sizeOrdered ( ) ) ;
2006-12-13 02:39:34 +01:00
acc . localContributions = ( rcLocal = = null ) ? 0 : rcLocal . size ( ) ;
acc . filteredResults = preorder . filteredCount ( ) ;
2006-09-06 19:51:28 +02:00
return acc ;
}
2006-09-16 02:07:09 +02:00
private void prefetchLocal ( indexContainer rcLocal , long timeout ) {
// pre-fetch some urls to fill LURL ram cache
2006-09-06 19:51:28 +02:00
2006-11-05 03:10:40 +01:00
if ( rcLocal = = null ) return ;
2006-09-13 19:13:28 +02:00
plasmaSearchPreOrder preorder = new plasmaSearchPreOrder ( query , ranking , rcLocal , timeout - System . currentTimeMillis ( ) ) ;
2007-04-03 17:35:29 +02:00
if ( preorder . filteredCount ( ) > query . wantedResults ) preorder . remove ( true , true ) ;
2006-09-06 19:51:28 +02:00
// start url-fetch
2007-03-21 16:35:35 +01:00
indexRWIEntry entry ;
2006-09-06 19:51:28 +02:00
try {
while ( preorder . hasNext ( ) ) {
2006-09-13 19:13:28 +02:00
if ( System . currentTimeMillis ( ) > = timeout ) break ;
2007-03-21 16:35:35 +01:00
entry = ( indexRWIEntry ) ( preorder . next ( ) [ 0 ] ) ;
2006-09-16 02:07:09 +02:00
// find and fetch the url entry
urlStore . load ( entry . urlHash ( ) , entry ) ;
2005-12-15 11:31:00 +01:00
}
} catch ( kelondroException ee ) {
serverLog . logSevere ( " PLASMA " , " Database Failure during plasmaSearch.order: " + ee . getMessage ( ) , ee ) ;
}
2005-10-12 14:28:49 +02:00
}
2005-12-15 14:21:42 +01:00
public void run ( ) {
flushThreads . add ( this ) ; // this will care that the search event object is referenced from somewhere while it is still alive
2006-01-31 02:20:28 +01:00
2005-10-13 15:57:15 +02:00
// put all new results into wordIndex
// this must be called after search results had been computed
2006-01-31 02:20:28 +01:00
// it is wise to call this within a separate thread because
// this method waits until all threads are finished
2007-04-05 12:14:48 +02:00
serverLog . logFine ( " PLASMA " , " STARTED FLUSHING GLOBAL SEARCH RESULTS FOR SEARCH " + query . queryString ) ;
2006-10-04 00:55:59 +02:00
2006-09-13 19:13:28 +02:00
int remaining = 0 ;
if ( primarySearchThreads = = null ) return ;
2005-10-13 15:57:15 +02:00
long starttime = System . currentTimeMillis ( ) ;
2006-09-13 19:13:28 +02:00
while ( true ) {
2006-10-25 04:24:41 +02:00
flushGlobalResults ( ) ; // must be flushed before first check of remaining threads, othervise it is possible that NO results are flushed at all
2006-09-13 19:13:28 +02:00
remaining = yacySearch . remainingWaiting ( primarySearchThreads ) ;
if ( secondarySearchThreads ! = null ) remaining + = yacySearch . remainingWaiting ( secondarySearchThreads ) ;
if ( remaining = = 0 ) break ;
2006-01-31 02:20:28 +01:00
2005-10-23 19:50:27 +02:00
// wait a little bit before trying again
2006-09-13 19:13:28 +02:00
try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { }
2005-10-13 15:57:15 +02:00
if ( System . currentTimeMillis ( ) - starttime > 90000 ) {
2006-09-13 19:13:28 +02:00
yacySearch . interruptAlive ( primarySearchThreads ) ;
if ( secondarySearchThreads ! = null ) yacySearch . interruptAlive ( secondarySearchThreads ) ;
2007-04-05 12:14:48 +02:00
log . logFine ( " SEARCH FLUSH: " + remaining + " PEERS STILL BUSY; ABANDONED; SEARCH WAS " + query . queryString ) ;
2005-10-13 15:57:15 +02:00
break ;
}
2006-09-06 19:51:28 +02:00
//log.logFine("FINISHED FLUSH RESULTS PROCESS for query " + query.hashes(","));
2005-10-13 15:57:15 +02:00
}
2007-04-05 12:14:48 +02:00
serverLog . logFine ( " PLASMA " , " FINISHED FLUSHING " + rcContainerFlushCount + " GLOBAL SEARCH RESULTS FOR SEARCH " + query . queryString ) ;
2006-01-31 02:20:28 +01:00
2005-10-13 15:57:15 +02:00
// finally delete the temporary index
2006-09-11 12:39:25 +02:00
rcContainers = null ;
2006-01-31 02:20:28 +01:00
flushThreads . remove ( this ) ;
}
2006-02-04 15:13:54 +01:00
public void flushGlobalResults ( ) {
2006-01-31 02:20:28 +01:00
// flush the rcGlobal as much as is there so far
// this must be called sometime after search results had been computed
int count = 0 ;
2006-09-11 12:39:25 +02:00
if ( ( rcContainers ! = null ) & & ( rcContainers . size ( ) > 0 ) ) {
synchronized ( rcContainers ) {
2006-01-31 02:20:28 +01:00
String wordHash ;
Iterator hashi = query . queryHashes . iterator ( ) ;
while ( hashi . hasNext ( ) ) {
wordHash = ( String ) hashi . next ( ) ;
2006-09-11 12:39:25 +02:00
rcContainers . setWordHash ( wordHash ) ;
2006-09-14 02:51:02 +02:00
wordIndex . addEntries ( rcContainers , System . currentTimeMillis ( ) , true ) ;
log . logFine ( " FLUSHED " + wordHash + " : " + rcContainers . size ( ) + " url entries " ) ;
2006-01-31 02:20:28 +01:00
}
// the rcGlobal was flushed, empty it
2006-09-11 12:39:25 +02:00
count + = rcContainers . size ( ) ;
rcContainers . clear ( ) ;
2006-01-31 02:20:28 +01:00
}
}
2006-10-04 00:55:59 +02:00
rcContainerFlushCount + = count ;
2005-10-13 15:57:15 +02:00
}
2005-10-10 02:32:15 +02:00
}