yacy_search_server/source/de/anomic/crawler/ResultURLs.java

237 lines
9.0 KiB
Java
Raw Normal View History

// ResultURLs.java
// -----------------------
// part of YaCy
// (C) by Michael Peter Christen; mc@yacy.net
// first published on http://yacy.net
// Frankfurt, Germany, 2004
//
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.crawler;
import java.net.MalformedURLException;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import net.yacy.cora.document.UTF8;
import net.yacy.cora.storage.ClusteredScoreMap;
import net.yacy.cora.storage.ScoreMap;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.data.meta.URIMetadataRow;
import net.yacy.kelondro.logging.Log;
import net.yacy.kelondro.order.Bitfield;
import net.yacy.kelondro.util.ReverseMapIterator;
public final class ResultURLs {
public enum EventOrigin {
// we must distinguish the following cases: resource-load was initiated by
// 1) global crawling: the index is extern, not here (not possible here)
// 2) result of search queries, some indexes are here (not possible here)
// 3) result of index transfer, some of them are here (not possible here)
// 4) proxy-load (initiator is "------------")
// 5) local prefetch/crawling (initiator is own seedHash)
// 6) local fetching for global crawling (other known or unknown initiator)
UNKNOWN(0),
REMOTE_RECEIPTS(1),
QUERIES(2),
DHT_TRANSFER(3),
PROXY_LOAD(4),
LOCAL_CRAWLING(5),
GLOBAL_CRAWLING(6),
SURROGATES(7);
protected int code;
private static final EventOrigin[] list = {
UNKNOWN, REMOTE_RECEIPTS, QUERIES, DHT_TRANSFER, PROXY_LOAD, LOCAL_CRAWLING, GLOBAL_CRAWLING, SURROGATES};
private EventOrigin(int code) {
this.code = code;
}
public int getCode() {
return this.code;
}
public static final EventOrigin getEvent(int key) {
return list[key];
}
}
private final static Map<EventOrigin, Map<String, InitExecEntry>> resultStacks = new ConcurrentHashMap<EventOrigin, Map<String, InitExecEntry>>(); // a mapping from urlHash to Entries
private final static Map<EventOrigin, ScoreMap<String>> resultDomains = new ConcurrentHashMap<EventOrigin, ScoreMap<String>>();
static {
for (EventOrigin origin: EventOrigin.values()) {
resultStacks.put(origin, new LinkedHashMap<String, InitExecEntry>());
resultDomains.put(origin, new ClusteredScoreMap<String>());
}
}
public static class InitExecEntry {
public byte[] initiatorHash, executorHash;
public InitExecEntry(final byte[] initiatorHash, final byte[] executorHash) {
this.initiatorHash = initiatorHash;
this.executorHash = executorHash;
}
}
public static void stack(
final URIMetadataRow e,
final byte[] initiatorHash,
final byte[] executorHash,
final EventOrigin stackType) {
// assert initiatorHash != null; // null == proxy !
major step forward to network switching (target is easy switch to intranet or other networks .. and back) This change is inspired by the need to see a network connected to the index it creates in a indexing team. It is not possible to divide the network and the index. Therefore all control files for the network was moved to the network within the INDEX/<network-name> subfolder. The remaining YACYDB is superfluous and can be deleted. The yacyDB and yacyNews data structures are now part of plasmaWordIndex. Therefore all methods, using static access to yacySeedDB had to be rewritten. A special problem had been all the port forwarding methods which had been tightly mixed with seed construction. It was not possible to move the port forwarding functions to the place, meaning and usage of plasmaWordIndex. Therefore the port forwarding had been deleted (I guess nobody used it and it can be simulated by methods outside of YaCy). The mySeed.txt is automatically moved to the current network position. A new effect causes that every network will create a different local seed file, which is ok, since the seed identifies the peer only against the network (it is the purpose of the seed hash to give a peer a location within the DHT). No other functional change has been made. The next steps to enable network switcing are: - shift of crawler tables from PLASMADB into the network (crawls are also network-specific) - possibly shift of plasmaWordIndex code into yacy package (index management is network-specific) - servlet to switch networks git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4765 6c8d7289-2bf4-0310-a012-ef5d649a1542
2008-05-06 01:13:47 +02:00
assert executorHash != null;
if (e == null) { return; }
try {
final Map<String, InitExecEntry> resultStack = getStack(stackType);
if (resultStack != null) {
resultStack.put(UTF8.String(e.hash()), new InitExecEntry(initiatorHash, executorHash));
}
} catch (final Exception ex) {
System.out.println("INTERNAL ERROR in newEntry/2: " + ex.toString());
return;
}
try {
final ScoreMap<String> domains = getDomains(stackType);
if (domains != null) {
domains.inc(e.metadata().url().getHost());
}
} catch (final Exception ex) {
System.out.println("INTERNAL ERROR in newEntry/3: " + ex.toString());
return;
}
}
public static int getStackSize(final EventOrigin stack) {
final Map<String, InitExecEntry> resultStack = getStack(stack);
if (resultStack == null) return 0;
return resultStack.size();
}
public static int getDomainListSize(final EventOrigin stack) {
final ScoreMap<String> domains = getDomains(stack);
if (domains == null) return 0;
return domains.size();
}
public static Iterator<Map.Entry<String, InitExecEntry>> results(final EventOrigin stack) {
final Map<String, InitExecEntry> resultStack = getStack(stack);
if (resultStack == null) return new LinkedHashMap<String, InitExecEntry>().entrySet().iterator();
return new ReverseMapIterator<String, InitExecEntry>(resultStack);
}
/**
* iterate all domains in the result domain statistic
* @return iterator of domains in reverse order (downwards)
*/
public static Iterator<String> domains(final EventOrigin stack) {
assert getDomains(stack) != null : "getDomains(" + stack + ") = null";
return getDomains(stack).keys(false);
}
public static int deleteDomain(final EventOrigin stack, String host, String hosthash) {
assert host != null : "host = null";
assert hosthash.length() == 6;
final Iterator<Map.Entry<String, InitExecEntry>> i = results(stack);
Map.Entry<String, InitExecEntry> w;
String urlhash;
while (i.hasNext()) {
w = i.next();
urlhash = w.getKey();
if (urlhash == null || urlhash.substring(6).equals(hosthash)) i.remove();
}
assert getDomains(stack) != null : "getDomains(" + stack + ") = null";
return getDomains(stack).delete(host);
}
/**
* return the count of the domain
* @param stack type
* @param domain name
* @return the number of occurrences of the domain in the stack statistics
*/
public static int domainCount(final EventOrigin stack, String domain) {
assert domain != null : "domain = null";
assert getDomains(stack) != null : "getDomains(" + stack + ") = null";
return getDomains(stack).get(domain);
}
/**
* returns the stack identified by the id <em>stack</em>
*
* @param stack id of resultStack
* @return null if stack does not exist (id is unknown or stack is null (which should not occur and an error is logged))
*/
private static Map<String, InitExecEntry> getStack(final EventOrigin stack) {
return resultStacks.get(stack);
}
private static ScoreMap<String> getDomains(final EventOrigin stack) {
return resultDomains.get(stack);
}
public static void clearStacks() {
for (EventOrigin origin: EventOrigin.values()) clearStack(origin);
}
public static void clearStack(final EventOrigin stack) {
final Map<String, InitExecEntry> resultStack = getStack(stack);
if (resultStack != null) resultStack.clear();
final ScoreMap<String> resultDomains = getDomains(stack);
if (resultDomains != null) {
// we do not clear this completely, just remove most of the less important entries
resultDomains.shrinkToMaxSize(100);
resultDomains.shrinkToMinScore(2);
}
}
public static boolean remove(final String urlHash) {
if (urlHash == null) return false;
Map<String, InitExecEntry> resultStack;
for (EventOrigin origin: EventOrigin.values()) {
resultStack = getStack(origin);
if (resultStack != null) resultStack.remove(urlHash);
}
return true;
}
/**
* test and benchmark
* @param args
*/
public static void main(final String[] args) {
try {
final DigestURI url = new DigestURI("http", "www.yacy.net", 80, "/");
final URIMetadataRow urlRef = new URIMetadataRow(url, "YaCy Homepage", "", "", "", 0.0f, 0.0f, new Date(), new Date(), new Date(), "", new byte[] {}, 123, 42, '?', new Bitfield(), "de".getBytes(), 0, 0, 0, 0, 0, 0);
EventOrigin stackNo = EventOrigin.LOCAL_CRAWLING;
System.out.println("valid test:\n=======");
// add
stack(urlRef, urlRef.hash(), url.hash(), stackNo);
// size
System.out.println("size of stack:\t"+ getStackSize(stackNo));
} catch (final MalformedURLException e) {
Log.logException(e);
}
}
}