2009-08-25 23:27:01 +02:00
// SearchEvent.java
// (C) 2005 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 10.10.2005 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
2009-09-05 22:41:21 +02:00
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
2009-08-25 23:27:01 +02:00
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.search ;
import java.util.ArrayList ;
2009-11-09 20:14:51 +01:00
import java.util.Iterator ;
import java.util.Map ;
2009-08-25 23:27:01 +02:00
2009-10-18 02:53:43 +02:00
import net.yacy.document.Condenser ;
2009-11-09 20:14:51 +01:00
import net.yacy.kelondro.data.meta.DigestURI ;
2009-10-11 02:12:19 +02:00
import net.yacy.kelondro.data.meta.URIMetadataRow ;
2009-11-09 20:14:51 +01:00
import net.yacy.kelondro.data.word.Word ;
2010-04-15 15:22:59 +02:00
import net.yacy.kelondro.index.HandleSet ;
import net.yacy.kelondro.index.RowSpaceExceededException ;
2009-10-10 01:13:30 +02:00
import net.yacy.kelondro.logging.Log ;
2009-12-08 15:25:51 +01:00
import net.yacy.kelondro.util.EventTracker ;
2009-10-10 03:14:19 +02:00
import net.yacy.kelondro.util.SortStack ;
import net.yacy.kelondro.util.SortStore ;
2010-03-20 11:28:03 +01:00
import net.yacy.repository.LoaderDispatcher ;
2009-10-10 01:13:30 +02:00
2009-08-27 16:34:41 +02:00
import de.anomic.search.MediaSnippet ;
2009-08-25 23:27:01 +02:00
import de.anomic.yacy.yacySeedDB ;
2009-10-20 00:34:44 +02:00
import de.anomic.yacy.graphics.ProfilingGraph ;
2009-08-25 23:27:01 +02:00
2009-08-27 17:19:48 +02:00
public class ResultFetcher {
2009-08-25 23:27:01 +02:00
// input values
2009-08-26 17:59:55 +02:00
final RankingProcess rankedCache ; // ordered search results, grows dynamically as all the query threads enrich this container
2009-08-30 12:28:23 +02:00
QueryParams query ;
2009-08-25 23:27:01 +02:00
private final yacySeedDB peers ;
// result values
2010-03-20 11:28:03 +01:00
protected final LoaderDispatcher loader ;
2009-08-27 16:34:41 +02:00
protected Worker [ ] workerThreads ;
protected final SortStore < ResultEntry > result ;
protected final SortStore < MediaSnippet > images ; // container to sort images by size
2010-04-20 15:45:22 +02:00
protected final HandleSet failedURLs ; // a set of urlhashes that could not been verified during search
2010-04-15 15:22:59 +02:00
protected final HandleSet snippetFetchWordHashes ; // a set of word hashes that are used to match with the snippets
2009-08-25 23:27:01 +02:00
long urlRetrievalAllTime ;
long snippetComputationAllTime ;
2009-09-18 11:19:52 +02:00
int taketimeout ;
2009-08-25 23:27:01 +02:00
2009-08-27 22:20:07 +02:00
public ResultFetcher (
2010-03-20 11:28:03 +01:00
final LoaderDispatcher loader ,
2009-08-25 23:27:01 +02:00
RankingProcess rankedCache ,
final QueryParams query ,
2009-09-18 11:19:52 +02:00
final yacySeedDB peers ,
final int taketimeout ) {
2009-08-25 23:27:01 +02:00
2010-03-20 11:28:03 +01:00
this . loader = loader ;
2009-08-25 23:27:01 +02:00
this . rankedCache = rankedCache ;
this . query = query ;
this . peers = peers ;
2009-09-18 11:19:52 +02:00
this . taketimeout = taketimeout ;
2009-08-25 23:27:01 +02:00
this . urlRetrievalAllTime = 0 ;
this . snippetComputationAllTime = 0 ;
2009-12-03 01:36:07 +01:00
this . result = new SortStore < ResultEntry > ( - 1 , true ) ; // this is the result, enriched with snippets, ranked and ordered by ranking
this . images = new SortStore < MediaSnippet > ( - 1 , true ) ;
2010-04-20 15:45:22 +02:00
this . failedURLs = new HandleSet ( URIMetadataRow . rowdef . primaryKeyLength , URIMetadataRow . rowdef . objectOrder , 0 ) ; // a set of url hashes where a worker thread tried to work on, but failed.
2009-08-25 23:27:01 +02:00
// snippets do not need to match with the complete query hashes,
// only with the query minus the stopwords which had not been used for the search
2010-04-15 15:22:59 +02:00
HandleSet filtered ;
try {
filtered = HandleSet . joinConstructive ( query . queryHashes , Switchboard . stopwordHashes ) ;
} catch ( RowSpaceExceededException e ) {
Log . logException ( e ) ;
filtered = new HandleSet ( query . queryHashes . row ( ) . primaryKeyLength , query . queryHashes . comparator ( ) , 0 ) ;
}
this . snippetFetchWordHashes = query . queryHashes . clone ( ) ;
2009-12-02 01:37:59 +01:00
if ( filtered ! = null & & ! filtered . isEmpty ( ) ) {
2010-04-15 15:22:59 +02:00
this . snippetFetchWordHashes . excludeDestructive ( Switchboard . stopwordHashes ) ;
2009-08-25 23:27:01 +02:00
}
// start worker threads to fetch urls and snippets
2009-08-30 12:28:23 +02:00
this . workerThreads = null ;
2009-11-20 15:35:33 +01:00
deployWorker ( Math . min ( 10 , query . itemsPerPage ) , query . neededResults ( ) ) ;
2009-12-08 15:25:51 +01:00
EventTracker . update ( " SEARCH " , new ProfilingGraph . searchEvent ( query . id ( true ) , this . workerThreads . length + " online snippet fetch threads started " , 0 , 0 ) , false , 30000 , ProfilingGraph . maxTime ) ;
2009-08-25 23:27:01 +02:00
}
2009-11-19 00:56:05 +01:00
public void deployWorker ( int deployCount , int neededResults ) {
2009-08-25 23:27:01 +02:00
if ( anyWorkerAlive ( ) ) return ;
2009-11-19 00:56:05 +01:00
this . workerThreads = new Worker [ ( query . onlineSnippetFetch ) ? deployCount : 1 ] ;
2009-08-30 12:28:23 +02:00
for ( int i = 0 ; i < workerThreads . length ; i + + ) {
this . workerThreads [ i ] = new Worker ( i , 10000 , ( query . onlineSnippetFetch ) ? 2 : 0 , neededResults ) ;
this . workerThreads [ i ] . start ( ) ;
2009-08-25 23:27:01 +02:00
}
}
2009-08-27 22:20:07 +02:00
boolean anyWorkerAlive ( ) {
2009-08-25 23:27:01 +02:00
if ( this . workerThreads = = null ) return false ;
for ( int i = 0 ; i < this . workerThreads . length ; i + + ) {
if ( ( this . workerThreads [ i ] ! = null ) & &
( this . workerThreads [ i ] . isAlive ( ) ) & &
( this . workerThreads [ i ] . busytime ( ) < 3000 ) ) return true ;
}
return false ;
}
public long getURLRetrievalTime ( ) {
return this . urlRetrievalAllTime ;
}
public long getSnippetComputationTime ( ) {
return this . snippetComputationAllTime ;
}
protected class Worker extends Thread {
private final long timeout ; // the date until this thread should try to work
private long lastLifeSign ; // when the last time the run()-loop was executed
private final int id ;
2010-01-11 00:09:48 +01:00
private final int snippetMode ;
private final int neededResults ;
2009-08-25 23:27:01 +02:00
2009-08-30 12:28:23 +02:00
public Worker ( final int id , final long maxlifetime , int snippetMode , int neededResults ) {
2009-08-25 23:27:01 +02:00
this . id = id ;
this . snippetMode = snippetMode ;
this . lastLifeSign = System . currentTimeMillis ( ) ;
this . timeout = System . currentTimeMillis ( ) + Math . max ( 1000 , maxlifetime ) ;
2009-08-30 12:28:23 +02:00
this . neededResults = neededResults ;
2009-08-25 23:27:01 +02:00
}
public void run ( ) {
// start fetching urls and snippets
2009-10-11 02:12:19 +02:00
URIMetadataRow page ;
2009-11-19 00:56:05 +01:00
//final int fetchAhead = snippetMode == 0 ? 0 : 10;
2009-08-25 23:27:01 +02:00
boolean nav_topics = query . navigators . equals ( " all " ) | | query . navigators . indexOf ( " topics " ) > = 0 ;
try {
while ( System . currentTimeMillis ( ) < this . timeout ) {
2009-11-19 00:56:05 +01:00
if ( result . size ( ) > neededResults ) break ;
2009-08-25 23:27:01 +02:00
this . lastLifeSign = System . currentTimeMillis ( ) ;
// check if we have enough
2010-01-07 13:41:43 +01:00
if ( ( query . contentdom = = ContentDomain . IMAGE ) & & ( images . size ( ) > = query . neededResults ( ) + 50 ) ) break ;
if ( ( query . contentdom ! = ContentDomain . IMAGE ) & & ( result . size ( ) > = query . neededResults ( ) + 10 ) ) break ;
2009-08-25 23:27:01 +02:00
// get next entry
2009-09-18 11:19:52 +02:00
page = rankedCache . takeURL ( true , taketimeout ) ;
2009-08-25 23:27:01 +02:00
if ( page = = null ) break ;
2010-04-20 15:45:22 +02:00
if ( failedURLs . has ( page . hash ( ) ) ) continue ;
2009-08-25 23:27:01 +02:00
2009-11-24 12:13:11 +01:00
final ResultEntry resultEntry = fetchSnippet ( page , snippetMode ) ; // does not fetch snippets if snippetMode == 0
2009-11-19 14:49:28 +01:00
2009-08-25 23:27:01 +02:00
if ( resultEntry = = null ) continue ; // the entry had some problems, cannot be used
2009-11-19 14:49:28 +01:00
if ( result . exists ( resultEntry ) ) continue ;
2009-08-25 23:27:01 +02:00
urlRetrievalAllTime + = resultEntry . dbRetrievalTime ;
snippetComputationAllTime + = resultEntry . snippetComputationTime ;
//System.out.println("+++DEBUG-resultWorker+++ fetched " + resultEntry.urlstring());
// place the result to the result vector
2009-11-19 14:49:28 +01:00
// apply post-ranking
2009-12-03 13:25:03 +01:00
long ranking = Long . valueOf ( rankedCache . getOrder ( ) . cardinal ( resultEntry . word ( ) ) ) ;
2009-11-19 14:49:28 +01:00
ranking + = postRanking ( resultEntry , rankedCache . getTopics ( ) ) ;
2009-11-20 04:30:48 +01:00
//System.out.println("*** resultEntry.hash = " + resultEntry.hash());
2009-11-19 14:49:28 +01:00
result . push ( resultEntry , ranking ) ;
if ( nav_topics ) rankedCache . addTopics ( resultEntry ) ;
2009-08-25 23:27:01 +02:00
//System.out.println("DEBUG SNIPPET_LOADING: thread " + id + " got " + resultEntry.url());
}
} catch ( final Exception e ) {
2009-11-05 21:28:37 +01:00
Log . logException ( e ) ;
2009-08-25 23:27:01 +02:00
}
Log . logInfo ( " SEARCH " , " resultWorker thread " + id + " terminated " ) ;
}
public long busytime ( ) {
return System . currentTimeMillis ( ) - this . lastLifeSign ;
}
}
2009-10-11 02:12:19 +02:00
protected ResultEntry fetchSnippet ( final URIMetadataRow page , final int snippetMode ) {
2009-08-27 22:20:07 +02:00
// Snippet Fetching can has 3 modes:
// 0 - do not fetch snippets
// 1 - fetch snippets offline only
// 2 - online snippet fetch
// load only urls if there was not yet a root url of that hash
// find the url entry
long startTime = System . currentTimeMillis ( ) ;
2009-10-11 02:12:19 +02:00
final URIMetadataRow . Components metadata = page . metadata ( ) ;
2010-01-29 16:59:24 +01:00
if ( metadata = = null ) return null ;
2009-08-27 22:20:07 +02:00
final long dbRetrievalTime = System . currentTimeMillis ( ) - startTime ;
if ( snippetMode = = 0 ) {
2009-11-24 12:13:11 +01:00
return new ResultEntry ( page , query . getSegment ( ) , peers , null , null , dbRetrievalTime , 0 ) ; // result without snippet
2009-08-27 22:20:07 +02:00
}
// load snippet
2009-11-19 00:56:05 +01:00
if ( query . contentdom = = ContentDomain . TEXT ) {
2009-08-27 22:20:07 +02:00
// attach text snippet
startTime = System . currentTimeMillis ( ) ;
2010-03-20 11:28:03 +01:00
final TextSnippet snippet = TextSnippet . retrieveTextSnippet (
this . loader ,
metadata ,
snippetFetchWordHashes ,
( snippetMode = = 2 ) ,
( ( query . constraint ! = null ) & & ( query . constraint . get ( Condenser . flag_cat_indexof ) ) ) ,
180 ,
( snippetMode = = 2 ) ? Integer . MAX_VALUE : 30000 ,
query . isGlobal ( ) ) ;
2009-08-27 22:20:07 +02:00
final long snippetComputationTime = System . currentTimeMillis ( ) - startTime ;
2010-04-20 15:45:22 +02:00
Log . logInfo ( " SEARCH " , " text snippet load time for " + metadata . url ( ) + " : " + snippetComputationTime + " , " + ( ( snippet . getErrorCode ( ) < 11 ) ? " snippet found " : ( " no snippet found ( " + snippet . getError ( ) + " ) " ) ) ) ;
2009-08-27 22:20:07 +02:00
if ( snippet . getErrorCode ( ) < 11 ) {
// we loaded the file and found the snippet
2009-11-24 12:13:11 +01:00
return new ResultEntry ( page , query . getSegment ( ) , peers , snippet , null , dbRetrievalTime , snippetComputationTime ) ; // result with snippet attached
2009-08-27 22:20:07 +02:00
} else if ( snippetMode = = 1 ) {
// we did not demand online loading, therefore a failure does not mean that the missing snippet causes a rejection of this result
// this may happen during a remote search, because snippet loading is omitted to retrieve results faster
2009-11-24 12:13:11 +01:00
return new ResultEntry ( page , query . getSegment ( ) , peers , null , null , dbRetrievalTime , snippetComputationTime ) ; // result without snippet
2009-08-27 22:20:07 +02:00
} else {
// problems with snippet fetch
2010-04-20 15:45:22 +02:00
registerFailure ( page . hash ( ) , " no text snippet for URL " + metadata . url ( ) ) ;
2009-08-27 22:20:07 +02:00
return null ;
}
} else {
// attach media information
startTime = System . currentTimeMillis ( ) ;
final ArrayList < MediaSnippet > mediaSnippets = MediaSnippet . retrieveMediaSnippets ( metadata . url ( ) , snippetFetchWordHashes , query . contentdom , ( snippetMode = = 2 ) , 6000 , query . isGlobal ( ) ) ;
final long snippetComputationTime = System . currentTimeMillis ( ) - startTime ;
2010-04-20 15:45:22 +02:00
Log . logInfo ( " SEARCH " , " media snippet load time for " + metadata . url ( ) + " : " + snippetComputationTime ) ;
2009-08-27 22:20:07 +02:00
2009-12-02 01:37:59 +01:00
if ( mediaSnippets ! = null & & ! mediaSnippets . isEmpty ( ) ) {
2009-08-27 22:20:07 +02:00
// found media snippets, return entry
2009-11-24 12:13:11 +01:00
return new ResultEntry ( page , query . getSegment ( ) , peers , null , mediaSnippets , dbRetrievalTime , snippetComputationTime ) ;
2009-08-27 22:20:07 +02:00
} else if ( snippetMode = = 1 ) {
2009-11-24 12:13:11 +01:00
return new ResultEntry ( page , query . getSegment ( ) , peers , null , null , dbRetrievalTime , snippetComputationTime ) ;
2009-08-27 22:20:07 +02:00
} else {
// problems with snippet fetch
2010-04-20 15:45:22 +02:00
registerFailure ( page . hash ( ) , " no media snippet for URL " + metadata . url ( ) ) ;
2009-08-27 22:20:07 +02:00
return null ;
}
}
// finished, no more actions possible here
}
2010-04-20 15:45:22 +02:00
private void registerFailure ( final byte [ ] urlhash , final String reason ) {
try {
this . failedURLs . put ( urlhash ) ;
} catch ( RowSpaceExceededException e ) {
Log . logException ( e ) ;
}
Log . logInfo ( " SEARCH " , " sorted out urlhash " + new String ( urlhash ) + " during search: " + reason ) ;
2009-08-25 23:27:01 +02:00
}
public int resultCount ( ) {
return this . result . size ( ) ;
}
public ResultEntry oneResult ( final int item ) {
// check if we already retrieved this item
// (happens if a search pages is accessed a second time)
2009-12-08 15:25:51 +01:00
EventTracker . update ( " SEARCH " , new ProfilingGraph . searchEvent ( query . id ( true ) , " obtain one result entry - start " , 0 , 0 ) , false , 30000 , ProfilingGraph . maxTime ) ;
2009-08-25 23:27:01 +02:00
if ( this . result . sizeStore ( ) > item ) {
// we have the wanted result already in the result array .. return that
return this . result . element ( item ) . element ;
}
2009-08-30 13:03:34 +02:00
/ *
2009-08-30 12:28:23 +02:00
System . out . println ( " rankedCache.size() = " + this . rankedCache . size ( ) ) ;
System . out . println ( " result.size() = " + this . result . size ( ) ) ;
System . out . println ( " query.neededResults() = " + query . neededResults ( ) ) ;
2009-08-30 13:03:34 +02:00
* /
2009-08-30 12:28:23 +02:00
if ( ( ! anyWorkerAlive ( ) ) & &
2009-11-19 00:56:05 +01:00
( ( ( query . contentdom = = ContentDomain . IMAGE ) & & ( images . size ( ) + 30 < query . neededResults ( ) ) ) | |
2009-08-30 12:28:23 +02:00
( this . result . size ( ) < query . neededResults ( ) ) ) & &
//(event.query.onlineSnippetFetch) &&
( this . rankedCache . size ( ) > this . result . size ( ) )
) {
2009-09-07 21:22:12 +02:00
// start worker threads to fetch urls and snippets
2009-11-20 15:35:33 +01:00
deployWorker ( Math . min ( 10 , query . itemsPerPage ) , query . neededResults ( ) ) ;
2009-08-30 12:28:23 +02:00
}
2009-08-25 23:27:01 +02:00
// finally wait until enough results are there produced from the
// snippet fetch process
while ( ( anyWorkerAlive ( ) ) & & ( result . size ( ) < = item ) ) {
2010-04-14 01:29:55 +02:00
try { Thread . sleep ( ( item % query . itemsPerPage ) * 10L ) ; } catch ( final InterruptedException e ) { }
2009-08-25 23:27:01 +02:00
}
// finally, if there is something, return the result
if ( this . result . size ( ) < = item ) return null ;
return this . result . element ( item ) . element ;
}
private int resultCounter = 0 ;
public ResultEntry nextResult ( ) {
final ResultEntry re = oneResult ( resultCounter ) ;
resultCounter + + ;
return re ;
}
2009-08-27 16:34:41 +02:00
public MediaSnippet oneImage ( final int item ) {
2009-08-25 23:27:01 +02:00
// check if we already retrieved this item (happens if a search pages is accessed a second time)
if ( this . images . sizeStore ( ) > item ) {
// we have the wanted result already in the result array .. return that
return this . images . element ( item ) . element ;
}
2009-11-19 01:43:42 +01:00
// generate result object
final ResultEntry result = nextResult ( ) ;
MediaSnippet ms ;
if ( result ! = null ) {
// iterate over all images in the result
final ArrayList < MediaSnippet > imagemedia = result . mediaSnippets ( ) ;
if ( imagemedia ! = null ) {
for ( int j = 0 ; j < imagemedia . size ( ) ; j + + ) {
ms = imagemedia . get ( j ) ;
images . push ( ms , Long . valueOf ( ms . ranking ) ) ;
2009-11-20 13:11:56 +01:00
//System.out.println("*** image " + ms.href.hash() + " images.size = " + images.size() + "/" + images.size());
2009-08-25 23:27:01 +02:00
}
}
}
// now take the specific item from the image stack
if ( this . images . size ( ) < = item ) return null ;
return this . images . element ( item ) . element ;
}
public ArrayList < SortStack < ResultEntry > . stackElement > completeResults ( final long waitingtime ) {
final long timeout = System . currentTimeMillis ( ) + waitingtime ;
while ( ( result . size ( ) < query . neededResults ( ) ) & & ( anyWorkerAlive ( ) ) & & ( System . currentTimeMillis ( ) < timeout ) ) {
try { Thread . sleep ( 100 ) ; } catch ( final InterruptedException e ) { }
//System.out.println("+++DEBUG-completeResults+++ sleeping " + 200);
}
return this . result . list ( this . result . size ( ) ) ;
}
2009-11-09 20:14:51 +01:00
public long postRanking (
final ResultEntry rentry ,
2010-03-05 22:25:49 +01:00
final Map < String , Navigator . Item > topwords ) {
2009-11-09 20:14:51 +01:00
long r = 0 ;
// for media search: prefer pages with many links
2009-11-19 00:56:05 +01:00
if ( query . contentdom = = ContentDomain . IMAGE ) r + = rentry . limage ( ) < < query . ranking . coeff_cathasimage ;
if ( query . contentdom = = ContentDomain . AUDIO ) r + = rentry . laudio ( ) < < query . ranking . coeff_cathasaudio ;
if ( query . contentdom = = ContentDomain . VIDEO ) r + = rentry . lvideo ( ) < < query . ranking . coeff_cathasvideo ;
if ( query . contentdom = = ContentDomain . APP ) r + = rentry . lapp ( ) < < query . ranking . coeff_cathasapp ;
2009-11-09 20:14:51 +01:00
// prefer hit with 'prefer' pattern
2010-03-23 11:17:28 +01:00
if ( query . prefer . matcher ( rentry . url ( ) . toNormalform ( true , true ) ) . matches ( ) ) r + = 256 < < query . ranking . coeff_prefer ;
if ( query . prefer . matcher ( rentry . title ( ) ) . matches ( ) ) r + = 256 < < query . ranking . coeff_prefer ;
2009-11-09 20:14:51 +01:00
// apply 'common-sense' heuristic using references
final String urlstring = rentry . url ( ) . toNormalform ( true , true ) ;
final String [ ] urlcomps = DigestURI . urlComps ( urlstring ) ;
2009-12-10 15:40:32 +01:00
final String [ ] descrcomps = DigestURI . splitpattern . split ( rentry . title ( ) . toLowerCase ( ) ) ;
2010-03-05 22:25:49 +01:00
Navigator . Item tc ;
2009-11-09 20:14:51 +01:00
for ( int j = 0 ; j < urlcomps . length ; j + + ) {
tc = topwords . get ( urlcomps [ j ] ) ;
2010-03-05 22:25:49 +01:00
if ( tc ! = null ) r + = Math . max ( 1 , tc . count ) < < query . ranking . coeff_urlcompintoplist ;
2009-11-09 20:14:51 +01:00
}
for ( int j = 0 ; j < descrcomps . length ; j + + ) {
tc = topwords . get ( descrcomps [ j ] ) ;
2010-03-05 22:25:49 +01:00
if ( tc ! = null ) r + = Math . max ( 1 , tc . count ) < < query . ranking . coeff_descrcompintoplist ;
2009-11-09 20:14:51 +01:00
}
// apply query-in-result matching
2010-04-15 15:22:59 +02:00
final HandleSet urlcomph = Word . words2hashesHandles ( urlcomps ) ;
final HandleSet descrcomph = Word . words2hashesHandles ( descrcomps ) ;
2009-11-09 20:14:51 +01:00
final Iterator < byte [ ] > shi = query . queryHashes . iterator ( ) ;
byte [ ] queryhash ;
while ( shi . hasNext ( ) ) {
queryhash = shi . next ( ) ;
2010-04-15 15:22:59 +02:00
if ( urlcomph . has ( queryhash ) ) r + = 256 < < query . ranking . coeff_appurl ;
if ( descrcomph . has ( queryhash ) ) r + = 256 < < query . ranking . coeff_app_dc_title ;
2009-11-09 20:14:51 +01:00
}
return r ;
}
2009-08-25 23:27:01 +02:00
}