yacy_search_server/source/de/anomic/search/Switchboard.java

2186 lines
113 KiB
Java
Raw Normal View History

// plasmaSwitchboard.java
// (C) 2004-2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2004 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
/*
This class holds the run-time environment of the plasma
Search Engine. It's data forms a blackboard which can be used
to organize running jobs around the indexing algorithm.
The blackboard consist of the following entities:
- storage: one plasmaStore object with the url-based database
- configuration: initialized by properties once, then by external functions
- job queues: for parsing, condensing, indexing
- black/blue/whitelists: controls input and output to the index
this class is also the core of the http crawling.
There are some items that need to be respected when crawling the web:
1) respect robots.txt
2) do not access one domain too frequently, wait between accesses
3) remember crawled URL's and do not access again too early
4) priorization of specific links should be possible (hot-lists)
5) attributes for crawling (depth, filters, hot/black-lists, priority)
6) different crawling jobs with different attributes ('Orders') simultanoulsy
We implement some specific tasks and use different database to archieve these goals:
- a database 'crawlerDisallow.db' contains all url's that shall not be crawled
- a database 'crawlerDomain.db' holds all domains and access times, where we loaded the disallow tables
this table contains the following entities:
<flag: robotes exist/not exist, last access of robots.txt, last access of domain (for access scheduling)>
- four databases for scheduled access: crawlerScheduledHotText.db, crawlerScheduledColdText.db,
crawlerScheduledHotMedia.db and crawlerScheduledColdMedia.db
- two stacks for new URLS: newText.stack and newMedia.stack
- two databases for URL double-check: knownText.db and knownMedia.db
- one database with crawling orders: crawlerOrders.db
The Information flow of a single URL that is crawled is as follows:
- a html file is loaded from a specific URL within the module httpdProxyServlet as
a process of the proxy.
- the file is passed to httpdProxyCache. Here it's processing is delayed until the proxy is idle.
- The cache entry is passed on to the plasmaSwitchboard. There the URL is stored into plasmaLURL where
the URL is stored under a specific hash. The URL's from the content are stripped off, stored in plasmaLURL
with a 'wrong' date (the date of the URL's are not known at this time, only after fetching) and stacked with
plasmaCrawlerTextStack. The content is read and splitted into rated words in plasmaCondenser.
The splitted words are then integrated into the index with plasmaSearch.
- In plasmaSearch the words are indexed by reversing the relation between URL and words: one URL points
to many words, the words within the document at the URL. After reversing, one word points
to many URL's, all the URL's where the word occurrs. One single word->URL-hash relation is stored in
plasmaIndexEntry. A set of plasmaIndexEntries is a reverse word index.
This reverse word index is stored temporarly in plasmaIndexCache.
- In plasmaIndexCache the single plasmaIndexEntry'ies are collected and stored into a plasmaIndex - entry
These plasmaIndex - Objects are the true reverse words indexes.
- in plasmaIndex the plasmaIndexEntry - objects are stored in a kelondroTree; an indexed file in the file system.
The information flow of a search request is as follows:
- in httpdFileServlet the user enters a search query, which is passed to plasmaSwitchboard
- in plasmaSwitchboard, the query is passed to plasmaSearch.
- in plasmaSearch, the plasmaSearch.result object is generated by simultanous enumeration of
URL hases in the reverse word indexes plasmaIndex
- (future: the plasmaSearch.result - object is used to identify more key words for a new search)
*/
package de.anomic.search;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.security.NoSuchAlgorithmException;
import java.security.PublicKey;
import java.security.spec.InvalidKeySpecException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Semaphore;
import java.util.regex.Pattern;
import net.yacy.document.Condenser;
import net.yacy.document.Document;
import net.yacy.document.TextParser;
import net.yacy.document.ParserException;
import net.yacy.document.content.DCEntry;
import net.yacy.document.content.RSSMessage;
import net.yacy.document.content.SurrogateReader;
import net.yacy.document.parser.html.ImageEntry;
import net.yacy.document.parser.xml.RSSFeed;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.data.meta.URIMetadataRow;
import net.yacy.kelondro.data.word.Word;
import net.yacy.kelondro.logging.Log;
import net.yacy.kelondro.order.Base64Order;
import net.yacy.kelondro.order.Digest;
import net.yacy.kelondro.order.NaturalOrder;
import net.yacy.kelondro.util.DateFormatter;
import net.yacy.kelondro.util.Domains;
import net.yacy.kelondro.util.FileUtils;
import net.yacy.kelondro.util.MemoryControl;
import net.yacy.kelondro.util.MemoryTracker;
import net.yacy.kelondro.util.SetTools;
import net.yacy.kelondro.util.OS;
import net.yacy.kelondro.workflow.BusyThread;
import net.yacy.kelondro.workflow.InstantBusyThread;
import net.yacy.kelondro.workflow.WorkflowJob;
import net.yacy.kelondro.workflow.WorkflowProcessor;
import net.yacy.kelondro.workflow.WorkflowThread;
import net.yacy.repository.Blacklist;
import net.yacy.repository.LoaderDispatcher;
import de.anomic.crawler.CrawlProfile;
import de.anomic.crawler.CrawlQueues;
import de.anomic.crawler.CrawlStacker;
import de.anomic.crawler.CrawlSwitchboard;
import de.anomic.crawler.ImporterManager;
import de.anomic.crawler.NoticedURL;
import de.anomic.crawler.ResourceObserver;
import de.anomic.crawler.ResultImages;
import de.anomic.crawler.ResultURLs;
import de.anomic.crawler.RobotsTxt;
import de.anomic.crawler.CrawlProfile.entry;
import de.anomic.crawler.retrieval.EventOrigin;
import de.anomic.crawler.retrieval.HTTPLoader;
import de.anomic.crawler.retrieval.Request;
import de.anomic.crawler.retrieval.Response;
import de.anomic.data.LibraryProvider;
import de.anomic.data.URLLicense;
import de.anomic.data.blogBoard;
import de.anomic.data.blogBoardComments;
import de.anomic.data.bookmarksDB;
import de.anomic.data.listManager;
import de.anomic.data.messageBoard;
import de.anomic.data.userDB;
import de.anomic.data.wiki.wikiBoard;
import de.anomic.data.wiki.wikiCode;
import de.anomic.data.wiki.wikiParser;
import de.anomic.http.client.Client;
import de.anomic.http.client.RemoteProxyConfig;
import de.anomic.http.client.Cache;
import de.anomic.http.server.HTTPDemon;
import de.anomic.http.server.HeaderFramework;
import de.anomic.http.server.RequestHeader;
import de.anomic.http.server.ResponseHeader;
import de.anomic.http.server.RobotsTxtConfig;
import de.anomic.net.UPnP;
import de.anomic.search.blockrank.CRDistribution;
import de.anomic.server.serverSwitch;
import de.anomic.server.serverCore;
import de.anomic.tools.crypt;
import de.anomic.tools.CryptoLib;
import de.anomic.yacy.yacyBuildProperties;
import de.anomic.yacy.yacyClient;
import de.anomic.yacy.yacyCore;
import de.anomic.yacy.yacyNewsPool;
import de.anomic.yacy.yacyNewsRecord;
import de.anomic.yacy.yacySeed;
import de.anomic.yacy.Tray;
import de.anomic.yacy.yacySeedDB;
import de.anomic.yacy.yacyUpdateLocation;
import de.anomic.yacy.yacyRelease;
replaced old DHT transmission method with new method. Many things have changed! some of them: - after a index selection is made, the index is splitted into its vertical components - from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue - each splitted chunk gets its own transmission thread - multiple transmission threads are started concurrently - the process can be monitored with the blocking queue servlet To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed. The new index distribution model using a vertical DHT was implemented. An abstraction of this model is implemented in the new dht package as interface. The freeworld network has now a configuration of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free. This modification has three main targets: - enhance the DHT transmission speed - with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times. - the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before. with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs. BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
import de.anomic.yacy.dht.Dispatcher;
import de.anomic.yacy.dht.PeerSelection;
import de.anomic.yacy.graphics.WebStructureGraph;
public final class Switchboard extends serverSwitch {
// load slots
public static int xstackCrawlSlots = 2000;
private int dhtMaxContainerCount = 500;
private int dhtMaxReferenceCount = 1000;
public static long lastPPMUpdate = System.currentTimeMillis()- 30000;
// colored list management
public static TreeSet<String> badwords = new TreeSet<String>(NaturalOrder.naturalComparator);
public static TreeSet<String> stopwords = new TreeSet<String>(NaturalOrder.naturalComparator);
public static TreeSet<String> blueList = null;
public static TreeSet<byte[]> badwordHashes = null;
public static TreeSet<byte[]> blueListHashes = null;
public static TreeSet<byte[]> stopwordHashes = null;
public static Blacklist urlBlacklist = null;
public static wikiParser wikiParser = null;
// storage management
public File htCachePath;
public File dictionariesPath;
public File listsPath;
public File htDocsPath;
public File rankingPath;
public File workPath;
public File releasePath;
public File networkRoot;
public File queuesRoot;
public File surrogatesInPath;
public File surrogatesOutPath;
public Map<String, String> rankingPermissions;
public Segments indexSegments;
public LoaderDispatcher loader;
public CrawlSwitchboard crawler;
public CrawlQueues crawlQueues;
public ResultURLs crawlResults;
public CrawlStacker crawlStacker;
public messageBoard messageDB;
public wikiBoard wikiDB;
public blogBoard blogDB;
public blogBoardComments blogCommentDB;
public RobotsTxt robots;
public boolean rankingOn;
public CRDistribution rankingOwnDistribution;
public CRDistribution rankingOtherDistribution;
public HashMap<String, Object[]> outgoingCookies, incomingCookies;
public volatile long proxyLastAccess, localSearchLastAccess, remoteSearchLastAccess;
public yacyCore yc;
public ResourceObserver observer;
public userDB userDB;
public bookmarksDB bookmarksDB;
public WebStructureGraph webStructure;
public ImporterManager dbImportManager;
public ArrayList<QueryParams> localSearches; // array of search result properties as HashMaps
public ArrayList<QueryParams> remoteSearches; // array of search result properties as HashMaps
public ConcurrentHashMap<String, TreeSet<Long>> localSearchTracker, remoteSearchTracker; // mappings from requesting host to a TreeSet of Long(access time)
public long indexedPages = 0;
public double requestedQueries = 0d;
public double totalQPM = 0d;
public TreeMap<byte[], String> clusterhashes; // map of peerhash(String)/alternative-local-address as ip:port or only ip (String) or null if address in seed should be used
public URLLicense licensedURLs;
public List<Pattern> networkWhitelist, networkBlacklist;
replaced old DHT transmission method with new method. Many things have changed! some of them: - after a index selection is made, the index is splitted into its vertical components - from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue - each splitted chunk gets its own transmission thread - multiple transmission threads are started concurrently - the process can be monitored with the blocking queue servlet To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed. The new index distribution model using a vertical DHT was implemented. An abstraction of this model is implemented in the new dht package as interface. The freeworld network has now a configuration of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free. This modification has three main targets: - enhance the DHT transmission speed - with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times. - the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before. with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs. BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
public Dispatcher dhtDispatcher;
public List<String> trail;
public yacySeedDB peers;
public WorkflowProcessor<indexingQueueEntry> indexingDocumentProcessor;
public WorkflowProcessor<indexingQueueEntry> indexingCondensementProcessor;
public WorkflowProcessor<indexingQueueEntry> indexingAnalysisProcessor;
public WorkflowProcessor<indexingQueueEntry> indexingStorageProcessor;
public RobotsTxtConfig robotstxtConfig = null;
public boolean useTailCache;
public boolean exceed134217727;
private final Semaphore shutdownSync = new Semaphore(0);
private boolean terminate = false;
//private Object crawlingPausedSync = new Object();
//private boolean crawlingIsPaused = false;
public Hashtable<String, Object[]> crawlJobsStatus = new Hashtable<String, Object[]>();
private static Switchboard sb = null;
public Switchboard(final File rootPath, final String initPath, final String configPath, final boolean applyPro) throws IOException {
super(rootPath, initPath, configPath, applyPro);
MemoryTracker.startSystemProfiling();
sb=this;
// set loglevel and log
setLog(new Log("PLASMA"));
if (applyPro) this.log.logInfo("This is the pro-version of YaCy");
// UPnP port mapping
if (getConfigBool(SwitchboardConstants.UPNP_ENABLED, false))
InstantBusyThread.oneTimeJob(UPnP.class, "addPortMapping", UPnP.log, 0);
// init TrayIcon if possible
Tray.init(this);
// remote proxy configuration
RemoteProxyConfig.init(this);
// memory configuration
this.useTailCache = getConfigBool("ramcopy", true);
if (MemoryControl.available() > 1024 * 1024 * 1024 * 1) this.useTailCache = true;
this.exceed134217727 = getConfigBool("exceed134217727", true);
if (MemoryControl.available() > 1024 * 1024 * 1024 * 2) this.exceed134217727 = true;
// load values from configs
final File indexPath = getConfigPath(SwitchboardConstants.INDEX_PRIMARY_PATH, SwitchboardConstants.INDEX_PATH_DEFAULT);
this.log.logConfig("Index Primary Path: " + indexPath.toString());
this.listsPath = getConfigPath(SwitchboardConstants.LISTS_PATH, SwitchboardConstants.LISTS_PATH_DEFAULT);
this.log.logConfig("Lists Path: " + this.listsPath.toString());
this.htDocsPath = getConfigPath(SwitchboardConstants.HTDOCS_PATH, SwitchboardConstants.HTDOCS_PATH_DEFAULT);
this.log.logConfig("HTDOCS Path: " + this.htDocsPath.toString());
this.rankingPath = getConfigPath(SwitchboardConstants.RANKING_PATH, SwitchboardConstants.RANKING_PATH_DEFAULT);
this.log.logConfig("Ranking Path: " + this.rankingPath.toString());
this.rankingPermissions = new HashMap<String, String>(); // mapping of permission - to filename.
this.workPath = getConfigPath(SwitchboardConstants.WORK_PATH, SwitchboardConstants.WORK_PATH_DEFAULT);
this.log.logConfig("Work Path: " + this.workPath.toString());
this.dictionariesPath = getConfigPath(SwitchboardConstants.DICTIONARY_SOURCE_PATH, SwitchboardConstants.DICTIONARY_SOURCE_PATH_DEFAULT);
this.log.logConfig("Dictionaries Path:" + this.dictionariesPath.toString());
// init libraries
this.log.logConfig("initializing libraries");
LibraryProvider.initialize(this.dictionariesPath);
// set a high maximum cache size to current size; this is adopted later automatically
final int wordCacheMaxCount = (int) getConfigLong(SwitchboardConstants.WORDCACHE_MAX_COUNT, 20000);
setConfig(SwitchboardConstants.WORDCACHE_MAX_COUNT, Integer.toString(wordCacheMaxCount));
// set network-specific performance attributes
if (this.firstInit) {
setRemotecrawlPPM(Math.max(1, (int) getConfigLong("network.unit.remotecrawl.speed", 60)));
}
// load the network definition
overwriteNetworkDefinition();
// start indexing management
log.logConfig("Starting Indexing Management");
final String networkName = getConfig(SwitchboardConstants.NETWORK_NAME, "");
final long fileSizeMax = (OS.isWindows) ? sb.getConfigLong("filesize.max.win", (long) Integer.MAX_VALUE) : sb.getConfigLong("filesize.max.other", (long) Integer.MAX_VALUE);
final int redundancy = (int) sb.getConfigLong("network.unit.dhtredundancy.senior", 1);
final int partitionExponent = (int) sb.getConfigLong("network.unit.dht.partitionExponent", 0);
this.networkRoot = new File(new File(indexPath, networkName), "NETWORK");
this.queuesRoot = new File(new File(indexPath, networkName), "QUEUES");
this.networkRoot.mkdirs();
this.queuesRoot.mkdirs();
final File mySeedFile = new File(networkRoot, yacySeedDB.DBFILE_OWN_SEED);
peers = new yacySeedDB(
networkRoot,
"seed.new.heap",
"seed.old.heap",
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent,
this.useTailCache,
this.exceed134217727);
File oldSingleSegment = new File(new File(indexPath, networkName), "TEXT");
File newSegmentsPath = new File(new File(indexPath, networkName), "SEGMENTS");
Segments.migrateOld(oldSingleSegment, newSegmentsPath, getConfig(SwitchboardConstants.SEGMENT_PUBLIC, "default"));
indexSegments = new Segments(
log,
newSegmentsPath,
wordCacheMaxCount,
fileSizeMax,
this.useTailCache,
this.exceed134217727);
crawler = new CrawlSwitchboard(
peers,
networkName,
log,
this.queuesRoot);
// set the default segment names
indexSegments.setSegment(Segments.Process.RECEIPTS, getConfig(SwitchboardConstants.SEGMENT_RECEIPTS, "default"));
indexSegments.setSegment(Segments.Process.QUERIES, getConfig(SwitchboardConstants.SEGMENT_QUERIES, "default"));
indexSegments.setSegment(Segments.Process.DHTIN, getConfig(SwitchboardConstants.SEGMENT_DHTIN, "default"));
indexSegments.setSegment(Segments.Process.DHTOUT, getConfig(SwitchboardConstants.SEGMENT_DHTOUT, "default"));
indexSegments.setSegment(Segments.Process.PROXY, getConfig(SwitchboardConstants.SEGMENT_PROXY, "default"));
indexSegments.setSegment(Segments.Process.LOCALCRAWLING, getConfig(SwitchboardConstants.SEGMENT_LOCALCRAWLING, "default"));
indexSegments.setSegment(Segments.Process.REMOTECRAWLING, getConfig(SwitchboardConstants.SEGMENT_REMOTECRAWLING, "default"));
indexSegments.setSegment(Segments.Process.PUBLIC, getConfig(SwitchboardConstants.SEGMENT_PUBLIC, "default"));
// init crawl results monitor cache
crawlResults = new ResultURLs();
// start yacy core
log.logConfig("Starting YaCy Protocol Core");
this.yc = new yacyCore(this);
InstantBusyThread.oneTimeJob(this, "loadSeedLists", yacyCore.log, 0);
//final long startedSeedListAquisition = System.currentTimeMillis();
replaced old DHT transmission method with new method. Many things have changed! some of them: - after a index selection is made, the index is splitted into its vertical components - from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue - each splitted chunk gets its own transmission thread - multiple transmission threads are started concurrently - the process can be monitored with the blocking queue servlet To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed. The new index distribution model using a vertical DHT was implemented. An abstraction of this model is implemented in the new dht package as interface. The freeworld network has now a configuration of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free. This modification has three main targets: - enhance the DHT transmission speed - with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times. - the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before. with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs. BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
// init a DHT transmission dispatcher
this.dhtDispatcher = new Dispatcher(
indexSegments.segment(Segments.Process.LOCALCRAWLING),
peers,
replaced old DHT transmission method with new method. Many things have changed! some of them: - after a index selection is made, the index is splitted into its vertical components - from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue - each splitted chunk gets its own transmission thread - multiple transmission threads are started concurrently - the process can be monitored with the blocking queue servlet To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed. The new index distribution model using a vertical DHT was implemented. An abstraction of this model is implemented in the new dht package as interface. The freeworld network has now a configuration of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free. This modification has three main targets: - enhance the DHT transmission speed - with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times. - the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before. with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs. BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
true,
30000);
// set up local robots.txt
this.robotstxtConfig = RobotsTxtConfig.init(this);
// setting timestamp of last proxy access
this.proxyLastAccess = System.currentTimeMillis() - 10000;
this.localSearchLastAccess = System.currentTimeMillis() - 10000;
this.remoteSearchLastAccess = System.currentTimeMillis() - 10000;
this.webStructure = new WebStructureGraph(log, rankingPath, "LOCAL/010_cr/", getConfig("CRDist0Path", CRDistribution.CR_OWN), new File(queuesRoot, "webStructure.map"));
// configuring list path
if (!(listsPath.exists())) listsPath.mkdirs();
// load coloured lists
if (blueList == null) {
// read only once upon first instantiation of this class
final String f = getConfig(SwitchboardConstants.LIST_BLUE, SwitchboardConstants.LIST_BLUE_DEFAULT);
final File plasmaBlueListFile = new File(f);
if (f != null) blueList = SetTools.loadList(plasmaBlueListFile, NaturalOrder.naturalComparator); else blueList= new TreeSet<String>();
blueListHashes = Word.words2hashes(blueList);
this.log.logConfig("loaded blue-list from file " + plasmaBlueListFile.getName() + ", " +
blueList.size() + " entries, " +
ppRamString(plasmaBlueListFile.length()/1024));
}
// load blacklist
this.log.logConfig("Loading blacklist ...");
final File blacklistsPath = getConfigPath(SwitchboardConstants.LISTS_PATH, SwitchboardConstants.LISTS_PATH_DEFAULT);
urlBlacklist = new Blacklist(blacklistsPath);
listManager.switchboard = this;
listManager.listsPath = blacklistsPath;
listManager.reloadBlacklists();
// load badwords (to filter the topwords)
if (badwords == null || badwords.size() == 0) {
final File badwordsFile = new File(rootPath, SwitchboardConstants.LIST_BADWORDS_DEFAULT);
badwords = SetTools.loadList(badwordsFile, NaturalOrder.naturalComparator);
badwordHashes = Word.words2hashes(badwords);
this.log.logConfig("loaded badwords from file " + badwordsFile.getName() +
", " + badwords.size() + " entries, " +
ppRamString(badwordsFile.length()/1024));
}
// load stopwords
if (stopwords == null || stopwords.size() == 0) {
final File stopwordsFile = new File(rootPath, SwitchboardConstants.LIST_STOPWORDS_DEFAULT);
stopwords = SetTools.loadList(stopwordsFile, NaturalOrder.naturalComparator);
stopwordHashes = Word.words2hashes(stopwords);
this.log.logConfig("loaded stopwords from file " + stopwordsFile.getName() + ", " +
stopwords.size() + " entries, " +
ppRamString(stopwordsFile.length()/1024));
}
// load ranking tables
final File YBRPath = new File(rootPath, "ranking/YBR");
if (YBRPath.exists()) {
RankingProcess.loadYBR(YBRPath, 15);
}
// loading the robots.txt db
this.log.logConfig("Initializing robots.txt DB");
final File robotsDBFile = new File(queuesRoot, "crawlRobotsTxt.heap");
robots = new RobotsTxt(robotsDBFile);
this.log.logConfig("Loaded robots.txt DB from file " + robotsDBFile.getName() +
", " + robots.size() + " entries" +
", " + ppRamString(robotsDBFile.length()/1024));
// start a cache manager
log.logConfig("Starting HT Cache Manager");
// create the cache directory
htCachePath = getConfigPath(SwitchboardConstants.HTCACHE_PATH, SwitchboardConstants.HTCACHE_PATH_DEFAULT);
this.log.logInfo("HTCACHE Path = " + htCachePath.getAbsolutePath());
final long maxCacheSize = 1024 * 1024 * Long.parseLong(getConfig(SwitchboardConstants.PROXY_CACHE_SIZE, "2")); // this is megabyte
Cache.init(htCachePath, peers.mySeed().hash, maxCacheSize);
// create the surrogates directories
surrogatesInPath = getConfigPath(SwitchboardConstants.SURROGATES_IN_PATH, SwitchboardConstants.SURROGATES_IN_PATH_DEFAULT);
this.log.logInfo("surrogates.in Path = " + surrogatesInPath.getAbsolutePath());
surrogatesInPath.mkdirs();
surrogatesOutPath = getConfigPath(SwitchboardConstants.SURROGATES_OUT_PATH, SwitchboardConstants.SURROGATES_OUT_PATH_DEFAULT);
this.log.logInfo("surrogates.out Path = " + surrogatesOutPath.getAbsolutePath());
surrogatesOutPath.mkdirs();
// create the release download directory
releasePath = getConfigPath(SwitchboardConstants.RELEASE_PATH, SwitchboardConstants.RELEASE_PATH_DEFAULT);
releasePath.mkdirs();
this.log.logInfo("RELEASE Path = " + releasePath.getAbsolutePath());
// starting message board
initMessages();
// starting wiki
initWiki();
//starting blog
initBlog();
// Init User DB
this.log.logConfig("Loading User DB");
final File userDbFile = new File(getRootPath(), "DATA/SETTINGS/user.heap");
this.userDB = new userDB(userDbFile);
this.log.logConfig("Loaded User DB from file " + userDbFile.getName() +
", " + this.userDB.size() + " entries" +
", " + ppRamString(userDbFile.length()/1024));
//Init bookmarks DB
initBookmarks();
Multiple updates regarding the yacy seedUpload facility, optional content parsers, thread pool configuration ... Please help me testing if everything works correct. *) Migration of yacy seedUpload functionality See: http://www.yacy-forum.de/viewtopic.php?t=256 - new uploaders can now be easily introduced because of a new modulare uploader system - default uploaders are: none, file, ftp - adding optional uploader for scp - each uploader provides its own configuration file that will be included into the settings page using the new template include feature - Each uploader can define its libx dependencies. If not all needed libs are available, the uploader is deactivated automatically. *) Migration of optional parsers See: http://www.yacy-forum.de/viewtopic.php?t=198 - Parsers can now also define there libx dependencies - adding parser for bzip compressed content - adding parser for gzip compressed content - adding parser for zip files - adding parser for tar files - adding parser to detect the mime-type of a file this is needed by the bzip/gzip Parser.java - adding parser for rtf files - removing extra configuration file yacy.parser the list of enabled parsers is now stored in the main config file *) Adding configuration option in the performance dialog to configure See: http://www.yacy-forum.de/viewtopic.php?t=267 - maxActive / maxIdle / minIdle values for httpd-session-threadpool - maxActive / maxIdle / minIdle values for crawler-threadpool *) Changing Crawling Filter behaviour See: http://www.yacy-forum.de/viewtopic.php?p=2631 *) Replacing some hardcoded strings with the proper constants of the httpHeader class *) Adding new libs to libx directory. This libs are - needed by new content parsers - needed by new optional seed uploader - needed by SOAP API (which will be committed later) git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@126 6c8d7289-2bf4-0310-a012-ef5d649a1542
2005-05-17 10:25:04 +02:00
// define a realtime parsable mimetype list
log.logConfig("Parser: Initializing Mime Type deny list");
TextParser.setDenyMime(getConfig(SwitchboardConstants.PARSER_MIME_DENY, null));
Multiple updates regarding the yacy seedUpload facility, optional content parsers, thread pool configuration ... Please help me testing if everything works correct. *) Migration of yacy seedUpload functionality See: http://www.yacy-forum.de/viewtopic.php?t=256 - new uploaders can now be easily introduced because of a new modulare uploader system - default uploaders are: none, file, ftp - adding optional uploader for scp - each uploader provides its own configuration file that will be included into the settings page using the new template include feature - Each uploader can define its libx dependencies. If not all needed libs are available, the uploader is deactivated automatically. *) Migration of optional parsers See: http://www.yacy-forum.de/viewtopic.php?t=198 - Parsers can now also define there libx dependencies - adding parser for bzip compressed content - adding parser for gzip compressed content - adding parser for zip files - adding parser for tar files - adding parser to detect the mime-type of a file this is needed by the bzip/gzip Parser.java - adding parser for rtf files - removing extra configuration file yacy.parser the list of enabled parsers is now stored in the main config file *) Adding configuration option in the performance dialog to configure See: http://www.yacy-forum.de/viewtopic.php?t=267 - maxActive / maxIdle / minIdle values for httpd-session-threadpool - maxActive / maxIdle / minIdle values for crawler-threadpool *) Changing Crawling Filter behaviour See: http://www.yacy-forum.de/viewtopic.php?p=2631 *) Replacing some hardcoded strings with the proper constants of the httpHeader class *) Adding new libs to libx directory. This libs are - needed by new content parsers - needed by new optional seed uploader - needed by SOAP API (which will be committed later) git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@126 6c8d7289-2bf4-0310-a012-ef5d649a1542
2005-05-17 10:25:04 +02:00
// start a loader
log.logConfig("Starting Crawl Loader");
this.loader = new LoaderDispatcher(this);
this.crawlQueues = new CrawlQueues(this, queuesRoot);
this.crawlQueues.noticeURL.setMinimumDelta(
this.getConfigLong("minimumLocalDelta", this.crawlQueues.noticeURL.getMinimumLocalDelta()),
this.getConfigLong("minimumGlobalDelta", this.crawlQueues.noticeURL.getMinimumGlobalDelta()));
/*
* Creating sync objects and loading status for the crawl jobs
* a) local crawl
* b) remote triggered crawl
* c) global crawl trigger
*/
this.crawlJobsStatus.put(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL, new Object[]{
new Object(),
Boolean.valueOf(getConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL + "_isPaused", "false"))});
this.crawlJobsStatus.put(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL, new Object[]{
new Object(),
Boolean.valueOf(getConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL + "_isPaused", "false"))});
this.crawlJobsStatus.put(SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER, new Object[]{
new Object(),
Boolean.valueOf(getConfig(SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER + "_isPaused", "false"))});
// init cookie-Monitor
this.log.logConfig("Starting Cookie Monitor");
this.outgoingCookies = new HashMap<String, Object[]>();
this.incomingCookies = new HashMap<String, Object[]>();
// init search history trackers
this.localSearchTracker = new ConcurrentHashMap<String, TreeSet<Long>>(); // String:TreeSet - IP:set of Long(accessTime)
this.remoteSearchTracker = new ConcurrentHashMap<String, TreeSet<Long>>();
this.localSearches = new ArrayList<QueryParams>(); // contains search result properties as HashMaps
this.remoteSearches = new ArrayList<QueryParams>();
// init messages: clean up message symbol
final File notifierSource = new File(getRootPath(), getConfig(SwitchboardConstants.HTROOT_PATH, SwitchboardConstants.HTROOT_PATH_DEFAULT) + "/env/grafics/empty.gif");
final File notifierDest = new File(getConfigPath(SwitchboardConstants.HTDOCS_PATH, SwitchboardConstants.HTDOCS_PATH_DEFAULT), "notifier.gif");
try {
FileUtils.copy(notifierSource, notifierDest);
} catch (final IOException e) {
}
// init ranking transmission
/*
CRDistOn = true/false
CRDist0Path = GLOBAL/010_owncr
CRDist0Method = 1
CRDist0Percent = 0
CRDist0Target =
CRDist1Path = GLOBAL/014_othercr/1
CRDist1Method = 9
CRDist1Percent = 30
CRDist1Target = kaskelix.de:8080,yacy.dyndns.org:8000,suma-lab.de:8080
**/
rankingOn = getConfig(SwitchboardConstants.RANKING_DIST_ON, "true").equals("true") && networkName.equals("freeworld");
rankingOwnDistribution = new CRDistribution(log, peers, new File(rankingPath, getConfig(SwitchboardConstants.RANKING_DIST_0_PATH, CRDistribution.CR_OWN)), (int) getConfigLong(SwitchboardConstants.RANKING_DIST_0_METHOD, CRDistribution.METHOD_ANYSENIOR), (int) getConfigLong(SwitchboardConstants.RANKING_DIST_0_METHOD, 0), getConfig(SwitchboardConstants.RANKING_DIST_0_TARGET, ""));
rankingOtherDistribution = new CRDistribution(log, peers, new File(rankingPath, getConfig(SwitchboardConstants.RANKING_DIST_1_PATH, CRDistribution.CR_OTHER)), (int) getConfigLong(SwitchboardConstants.RANKING_DIST_1_METHOD, CRDistribution.METHOD_MIXEDSENIOR), (int) getConfigLong(SwitchboardConstants.RANKING_DIST_1_METHOD, 30), getConfig(SwitchboardConstants.RANKING_DIST_1_TARGET, "kaskelix.de:8080,yacy.dyndns.org:8000"));
// init nameCacheNoCachingList
Domains.setNoCachingPatterns(getConfig(SwitchboardConstants.HTTPC_NAME_CACHE_CACHING_PATTERNS_NO,""));
// generate snippets cache
log.logConfig("Initializing Snippet Cache");
TextSnippet.init(log, this);
// init the wiki
wikiParser = new wikiCode(this.peers.mySeed().getClusterAddress());
// initializing the resourceObserver
InstantBusyThread.oneTimeJob(ResourceObserver.class, "initThread", ResourceObserver.log, 0);
*) Asynchronous queuing of crawl job URLs (stackCrawl) various checks like the blacklist check or the robots.txt disallow check are now done by a separate thread to unburden the indexer thread(s) TODO: maybe we have to introduce a threadpool here if it turn out that this single thread is a bottleneck because of the time consuming robots.txt downloads *) improved index transfer The index selection and transmission is done in parallel now to improve index transfer performance. TODO: maybe we could speed up performance by unsing multiple transmission threads in parallel instead of only a single one. *) gzip encoded post requests it is now configureable if a gzip encoded post request should be send on intex transfer/distribution *) storage Peer (very experimentell and not optimized yet) Now it's possible to send the result of the yacy indexer thread to a remote peer istead of storing the indexed words locally. This could be done by setting the property "storagePeerHash" in the yacy config file - Please note that if the index transfer fails, the index ist stored locally. - TODO: currently this index transfer is done by the indexer thread. To seedup the indexer a) this transmission should be done in parallel and b) multiple chunks should be bundled and transfered together *) general performance improvements - better memory cleanup after http request processing has finished - replacing some string concatenations with stringBuffers - replacing BufferedInputStreams with serverByteBuffer - replacing vectors with arraylists wherever possible - replacing hashtables with hashmaps wherever possible This was done because function calls to verctor or hashtable functions take 3 time longer than calls to functions of arraylists or hashmaps. TODO: we should take a look on the class serverObject which is inherited from hashmap Do we realy need a synchronization for this class? TODO: replace arraylists with linkedLists if random access to the list elements is not needed *) Robots Parser supports if-modified-since downloads now If the downloaded robots.txt file is older than 7 days the robots parser tries to download the robots.txt with the if-modified-since header to avoid unnecessary downloads if the file was not changed. Additionally the ETag header is used to detect changes. *) Crawler: better handling of unsupported mimeTypes + FileExtension *) Bugfix: plasmaWordIndexEntity was not closed correctly in - query.java - plasmaswitchboard.java *) function minimizeUrlDB added to yacy.java this function tests the current urlHashDB for unused urls ATTENTION: please don't use this function at the moment because it causes the wordIndexDB to flush all words into the word directory! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@853 6c8d7289-2bf4-0310-a012-ef5d649a1542
2005-10-05 12:45:33 +02:00
// initializing the stackCrawlThread
this.crawlStacker = new CrawlStacker(
this.crawlQueues,
this.crawler,
this.indexSegments.segment(Segments.Process.LOCALCRAWLING),
this.peers,
"local.any".indexOf(getConfig("network.unit.domain", "global")) >= 0,
"global.any".indexOf(getConfig("network.unit.domain", "global")) >= 0);
*) Asynchronous queuing of crawl job URLs (stackCrawl) various checks like the blacklist check or the robots.txt disallow check are now done by a separate thread to unburden the indexer thread(s) TODO: maybe we have to introduce a threadpool here if it turn out that this single thread is a bottleneck because of the time consuming robots.txt downloads *) improved index transfer The index selection and transmission is done in parallel now to improve index transfer performance. TODO: maybe we could speed up performance by unsing multiple transmission threads in parallel instead of only a single one. *) gzip encoded post requests it is now configureable if a gzip encoded post request should be send on intex transfer/distribution *) storage Peer (very experimentell and not optimized yet) Now it's possible to send the result of the yacy indexer thread to a remote peer istead of storing the indexed words locally. This could be done by setting the property "storagePeerHash" in the yacy config file - Please note that if the index transfer fails, the index ist stored locally. - TODO: currently this index transfer is done by the indexer thread. To seedup the indexer a) this transmission should be done in parallel and b) multiple chunks should be bundled and transfered together *) general performance improvements - better memory cleanup after http request processing has finished - replacing some string concatenations with stringBuffers - replacing BufferedInputStreams with serverByteBuffer - replacing vectors with arraylists wherever possible - replacing hashtables with hashmaps wherever possible This was done because function calls to verctor or hashtable functions take 3 time longer than calls to functions of arraylists or hashmaps. TODO: we should take a look on the class serverObject which is inherited from hashmap Do we realy need a synchronization for this class? TODO: replace arraylists with linkedLists if random access to the list elements is not needed *) Robots Parser supports if-modified-since downloads now If the downloaded robots.txt file is older than 7 days the robots parser tries to download the robots.txt with the if-modified-since header to avoid unnecessary downloads if the file was not changed. Additionally the ETag header is used to detect changes. *) Crawler: better handling of unsupported mimeTypes + FileExtension *) Bugfix: plasmaWordIndexEntity was not closed correctly in - query.java - plasmaswitchboard.java *) function minimizeUrlDB added to yacy.java this function tests the current urlHashDB for unused urls ATTENTION: please don't use this function at the moment because it causes the wordIndexDB to flush all words into the word directory! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@853 6c8d7289-2bf4-0310-a012-ef5d649a1542
2005-10-05 12:45:33 +02:00
// initializing dht chunk generation
this.dhtMaxReferenceCount = (int) getConfigLong(SwitchboardConstants.INDEX_DIST_CHUNK_SIZE_START, 50);
// init robinson cluster
// before we do that, we wait some time until the seed list is loaded.
this.clusterhashes = this.peers.clusterHashes(getConfig("cluster.peers.yacydomain", ""));
// deploy blocking threads
int indexerThreads = Math.max(1, WorkflowProcessor.useCPU / 2);
this.indexingStorageProcessor = new WorkflowProcessor<indexingQueueEntry>(
"storeDocumentIndex",
"This is the sequencing step of the indexing queue: no concurrency is wanted here, because the access of the indexer works better if it is not concurrent. Files are written as streams, councurrency would destroy IO performance. In this process the words are written to the RWI cache, which flushes if it is full.",
new String[]{"RWI/Cache/Collections"},
this, "storeDocumentIndex", WorkflowProcessor.useCPU + 40, null, indexerThreads);
this.indexingAnalysisProcessor = new WorkflowProcessor<indexingQueueEntry>(
"webStructureAnalysis",
"This just stores the link structure of the document into a web structure database.",
new String[]{"storeDocumentIndex"},
this, "webStructureAnalysis", WorkflowProcessor.useCPU + 20, indexingStorageProcessor, WorkflowProcessor.useCPU + 1);
this.indexingCondensementProcessor = new WorkflowProcessor<indexingQueueEntry>(
"condenseDocument",
"This does a structural analysis of plain texts: markup of headlines, slicing into phrases (i.e. sentences), markup with position, counting of words, calculation of term frequency.",
new String[]{"webStructureAnalysis"},
this, "condenseDocument", WorkflowProcessor.useCPU + 10, indexingAnalysisProcessor, WorkflowProcessor.useCPU + 1);
this.indexingDocumentProcessor = new WorkflowProcessor<indexingQueueEntry>(
"parseDocument",
"This does the parsing of the newly loaded documents from the web. The result is not only a plain text document, but also a list of URLs that are embedded into the document. The urls are handed over to the CrawlStacker. This process has two child process queues!",
new String[]{"condenseDocument", "CrawlStacker"},
this, "parseDocument", 2 * WorkflowProcessor.useCPU + 1, indexingCondensementProcessor, 2 * WorkflowProcessor.useCPU + 1);
// deploy busy threads
log.logConfig("Starting Threads");
MemoryControl.gc(10000, "plasmaSwitchboard, help for profiler"); // help for profiler - thq
deployThread(SwitchboardConstants.CLEANUP, "Cleanup", "simple cleaning process for monitoring information", null,
new InstantBusyThread(this, SwitchboardConstants.CLEANUP_METHOD_START, SwitchboardConstants.CLEANUP_METHOD_JOBCOUNT, SwitchboardConstants.CLEANUP_METHOD_FREEMEM), 600000); // all 5 Minutes, wait 10 minutes until first run
deployThread(SwitchboardConstants.SURROGATES, "Surrogates", "A thread that polls the SURROGATES path and puts all Documents in one surroagte file into the indexing queue.", null,
new InstantBusyThread(this, SwitchboardConstants.SURROGATES_METHOD_START, SwitchboardConstants.SURROGATES_METHOD_JOBCOUNT, SwitchboardConstants.SURROGATES_METHOD_FREEMEM), 10000);
deployThread(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL, "Remote Crawl Job", "thread that performes a single crawl/indexing step triggered by a remote peer", null,
new InstantBusyThread(crawlQueues, SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL_METHOD_START, SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL_METHOD_JOBCOUNT, SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL_METHOD_FREEMEM), 30000);
deployThread(SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER, "Remote Crawl URL Loader", "thread that loads remote crawl lists from other peers", "",
new InstantBusyThread(crawlQueues, SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER_METHOD_START, SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER_METHOD_JOBCOUNT, SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER_METHOD_FREEMEM), 30000); // error here?
deployThread(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL, "Local Crawl", "thread that performes a single crawl step from the local crawl queue", "/IndexCreateWWWLocalQueue_p.html",
new InstantBusyThread(crawlQueues, SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_METHOD_START, SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_METHOD_JOBCOUNT, SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_METHOD_FREEMEM), 10000);
deployThread(SwitchboardConstants.SEED_UPLOAD, "Seed-List Upload", "task that a principal peer performes to generate and upload a seed-list to a ftp account", null,
new InstantBusyThread(yc, SwitchboardConstants.SEED_UPLOAD_METHOD_START, SwitchboardConstants.SEED_UPLOAD_METHOD_JOBCOUNT, SwitchboardConstants.SEED_UPLOAD_METHOD_FREEMEM), 180000);
deployThread(SwitchboardConstants.PEER_PING, "YaCy Core", "this is the p2p-control and peer-ping task", null,
new InstantBusyThread(yc, SwitchboardConstants.PEER_PING_METHOD_START, SwitchboardConstants.PEER_PING_METHOD_JOBCOUNT, SwitchboardConstants.PEER_PING_METHOD_FREEMEM), 2000);
deployThread(SwitchboardConstants.INDEX_DIST, "DHT Distribution", "selection, transfer and deletion of index entries that are not searched on your peer, but on others", null,
new InstantBusyThread(this, SwitchboardConstants.INDEX_DIST_METHOD_START, SwitchboardConstants.INDEX_DIST_METHOD_JOBCOUNT, SwitchboardConstants.INDEX_DIST_METHOD_FREEMEM), 5000,
Long.parseLong(getConfig(SwitchboardConstants.INDEX_DIST_IDLESLEEP , "5000")),
Long.parseLong(getConfig(SwitchboardConstants.INDEX_DIST_BUSYSLEEP , "0")),
Long.parseLong(getConfig(SwitchboardConstants.INDEX_DIST_MEMPREREQ , "1000000")));
// test routine for snippet fetch
//Set query = new HashSet();
//query.add(CrawlSwitchboardEntry.word2hash("Weitergabe"));
//query.add(CrawlSwitchboardEntry.word2hash("Zahl"));
//plasmaSnippetCache.result scr = snippetCache.retrieve(new URL("http://www.heise.de/mobil/newsticker/meldung/mail/54980"), query, true);
//plasmaSnippetCache.result scr = snippetCache.retrieve(new URL("http://www.heise.de/security/news/foren/go.shtml?read=1&msg_id=7301419&forum_id=72721"), query, true);
//plasmaSnippetCache.result scr = snippetCache.retrieve(new URL("http://www.heise.de/kiosk/archiv/ct/2003/4/20"), query, true, 260);
this.dbImportManager = new ImporterManager();
this.trail = new ArrayList<String>();
log.logConfig("Finished Switchboard Initialization");
}
public int getActiveQueueSize() {
return
this.indexingDocumentProcessor.queueSize() +
this.indexingCondensementProcessor.queueSize() +
this.indexingAnalysisProcessor.queueSize() +
this.indexingStorageProcessor.queueSize();
}
public void overwriteNetworkDefinition() {
// load network configuration into settings
String networkUnitDefinition = getConfig("network.unit.definition", "defaults/yacy.network.freeworld.unit");
final String networkGroupDefinition = getConfig("network.group.definition", "yacy.network.group");
// patch old values
if (networkUnitDefinition.equals("yacy.network.unit")) {
networkUnitDefinition = "defaults/yacy.network.freeworld.unit";
setConfig("network.unit.definition", networkUnitDefinition);
}
// remove old release and bootstrap locations
Iterator<String> ki = configKeys();
ArrayList<String> d = new ArrayList<String>();
String k;
while (ki.hasNext()) {
k = ki.next();
if (k.startsWith("network.unit.update.location")) d.add(k);
if (k.startsWith("network.unit.bootstrap")) d.add(k);
}
for (String s:d) this.removeConfig(s); // must be removed afterwards othervise a ki.remove() would not remove the property on file
// include additional network definition properties into our settings
// note that these properties cannot be set in the application because they are
// _always_ overwritten each time with the default values. This is done so on purpose.
// the network definition should be made either consistent for all peers,
// or independently using a bootstrap URL
Map<String, String> initProps;
if (networkUnitDefinition.startsWith("http://")) {
try {
setConfig(Switchboard.loadHashMap(new DigestURI(networkUnitDefinition, null)));
} catch (final MalformedURLException e) { }
} else {
final File networkUnitDefinitionFile = (networkUnitDefinition.startsWith("/")) ? new File(networkUnitDefinition) : new File(getRootPath(), networkUnitDefinition);
if (networkUnitDefinitionFile.exists()) {
initProps = FileUtils.loadMap(networkUnitDefinitionFile);
setConfig(initProps);
}
}
if (networkGroupDefinition.startsWith("http://")) {
try {
setConfig(Switchboard.loadHashMap(new DigestURI(networkGroupDefinition, null)));
} catch (final MalformedURLException e) { }
} else {
final File networkGroupDefinitionFile = new File(getRootPath(), networkGroupDefinition);
if (networkGroupDefinitionFile.exists()) {
initProps = FileUtils.loadMap(networkGroupDefinitionFile);
setConfig(initProps);
}
}
// set release locations
int i = 0;
CryptoLib cryptoLib;
try {
cryptoLib = new CryptoLib();
while (true) {
String location = getConfig("network.unit.update.location" + i, "");
if (location.length() == 0) break;
DigestURI locationURL;
try {
// try to parse url
locationURL = new DigestURI(location, null);
} catch (final MalformedURLException e) {
break;
}
PublicKey publicKey = null;
// get public key if it's in config
try {
String publicKeyString = getConfig("network.unit.update.location" + i + ".key", null);
if(publicKeyString != null) {
byte[] publicKeyBytes = Base64Order.standardCoder.decode(publicKeyString.trim());
publicKey = cryptoLib.getPublicKeyFromBytes(publicKeyBytes);
}
} catch (InvalidKeySpecException e) {
e.printStackTrace();
}
yacyUpdateLocation updateLocation = new yacyUpdateLocation(locationURL, publicKey);
yacyRelease.latestReleaseLocations.add(updateLocation);
i++;
}
} catch (NoSuchAlgorithmException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// initiate url license object
licensedURLs = new URLLicense(8);
// set white/blacklists
this.networkWhitelist = Domains.makePatterns(getConfig(SwitchboardConstants.NETWORK_WHITELIST, ""));
this.networkBlacklist = Domains.makePatterns(getConfig(SwitchboardConstants.NETWORK_BLACKLIST, ""));
/*
// in intranet and portal network set robinson mode
if (networkUnitDefinition.equals("defaults/yacy.network.webportal.unit") ||
networkUnitDefinition.equals("defaults/yacy.network.intranet.unit")) {
// switch to robinson mode
setConfig("crawlResponse", "false");
setConfig(plasmaSwitchboardConstants.INDEX_DIST_ALLOW, false);
setConfig(plasmaSwitchboardConstants.INDEX_RECEIVE_ALLOW, false);
}
// in freeworld network set full p2p mode
if (networkUnitDefinition.equals("defaults/yacy.network.freeworld.unit")) {
// switch to robinson mode
setConfig("crawlResponse", "true");
setConfig(plasmaSwitchboardConstants.INDEX_DIST_ALLOW, true);
setConfig(plasmaSwitchboardConstants.INDEX_RECEIVE_ALLOW, true);
}
*/
}
public void switchNetwork(final String networkDefinition) {
log.logInfo("SWITCH NETWORK: switching to '" + networkDefinition + "'");
// pause crawls
final boolean lcp = crawlJobIsPaused(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
if (!lcp) pauseCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
final boolean rcp = crawlJobIsPaused(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
if (!rcp) pauseCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
// trigger online caution
proxyLastAccess = System.currentTimeMillis() + 3000; // at least 3 seconds online caution to prevent unnecessary action on database meanwhile
log.logInfo("SWITCH NETWORK: SHUT DOWN OF OLD INDEX DATABASE...");
// clean search events which have cached relations to the old index
SearchEventCache.cleanupEvents(true);
// switch the networks
synchronized (this) {
// shut down
synchronized (this.indexSegments) {
this.indexSegments.close();
}
this.crawlStacker.announceClose();
this.crawlStacker.close();
this.webStructure.close();
this.robots.close();
log.logInfo("SWITCH NETWORK: START UP OF NEW INDEX DATABASE...");
// new properties
setConfig("network.unit.definition", networkDefinition);
overwriteNetworkDefinition();
final File indexPrimaryPath = getConfigPath(SwitchboardConstants.INDEX_PRIMARY_PATH, SwitchboardConstants.INDEX_PATH_DEFAULT);
final int wordCacheMaxCount = (int) getConfigLong(SwitchboardConstants.WORDCACHE_MAX_COUNT, 20000);
final long fileSizeMax = (OS.isWindows) ? sb.getConfigLong("filesize.max.win", (long) Integer.MAX_VALUE) : sb.getConfigLong("filesize.max.other", (long) Integer.MAX_VALUE);
final int redundancy = (int) sb.getConfigLong("network.unit.dhtredundancy.senior", 1);
final int partitionExponent = (int) sb.getConfigLong("network.unit.dht.partitionExponent", 0);
final String networkName = getConfig(SwitchboardConstants.NETWORK_NAME, "");
this.networkRoot = new File(new File(indexPrimaryPath, networkName), "NETWORK");
this.queuesRoot = new File(new File(indexPrimaryPath, networkName), "QUEUES");
this.networkRoot.mkdirs();
this.queuesRoot.mkdirs();
// relocate
this.crawlQueues.relocate(this.queuesRoot); // cannot be closed because the busy threads are working with that object
final File mySeedFile = new File(this.networkRoot, yacySeedDB.DBFILE_OWN_SEED);
peers = new yacySeedDB(
this.networkRoot,
"seed.new.heap",
"seed.old.heap",
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent,
this.useTailCache,
this.exceed134217727);
indexSegments = new Segments(
log,
new File(new File(indexPrimaryPath, networkName), "SEGMENTS"),
wordCacheMaxCount,
fileSizeMax,
this.useTailCache,
this.exceed134217727);
// startup
crawler = new CrawlSwitchboard(
peers,
networkName,
log,
this.queuesRoot);
// create new web structure
this.webStructure = new WebStructureGraph(log, rankingPath, "LOCAL/010_cr/", getConfig("CRDist0Path", CRDistribution.CR_OWN), new File(queuesRoot, "webStructure.map"));
// load the robots.txt database
this.log.logConfig("Initializing robots.txt DB");
final File robotsDBFile = new File(this.queuesRoot, "crawlRobotsTxt.heap");
this.robots = new RobotsTxt(robotsDBFile);
this.log.logConfig("Loaded robots.txt DB from file " + robotsDBFile.getName() +
", " + robots.size() + " entries" +
", " + ppRamString(robotsDBFile.length()/1024));
// start a loader
log.logConfig("Starting Crawl Loader");
this.crawlQueues = new CrawlQueues(this, this.queuesRoot);
this.crawlQueues.noticeURL.setMinimumDelta(
this.getConfigLong("minimumLocalDelta", this.crawlQueues.noticeURL.getMinimumLocalDelta()),
this.getConfigLong("minimumGlobalDelta", this.crawlQueues.noticeURL.getMinimumGlobalDelta()));
this.crawlStacker = new CrawlStacker(
this.crawlQueues,
this.crawler,
this.indexSegments.segment(Segments.Process.LOCALCRAWLING),
this.peers,
"local.any".indexOf(getConfig("network.unit.domain", "global")) >= 0,
"global.any".indexOf(getConfig("network.unit.domain", "global")) >= 0);
}
// start up crawl jobs
continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
log.logInfo("SWITCH NETWORK: FINISHED START UP, new network is now '" + networkDefinition + "'.");
// check status of account configuration: when local url crawling is allowed, it is not allowed
// that an automatic authorization of localhost is done, because in this case crawls from local
// addresses are blocked to prevent attack szenarios where remote pages contain links to localhost
// addresses that can steer a YaCy peer
if ((crawlStacker.acceptLocalURLs()) && (getConfigBool("adminAccountForLocalhost", false))) {
setConfig("adminAccountForLocalhost", false);
if (getConfig(HTTPDemon.ADMIN_ACCOUNT_B64MD5, "").startsWith("0000")) {
// the password was set automatically with a random value.
// We must remove that here to prevent that a user cannot log in any more
setConfig(HTTPDemon.ADMIN_ACCOUNT_B64MD5, "");
// after this a message must be generated to alert the user to set a new password
log.logInfo("RANDOM PASSWORD REMOVED! User must set a new password");
}
}
// set the network-specific remote crawl ppm
setRemotecrawlPPM(Math.max(1, (int) getConfigLong("network.unit.remotecrawl.speed", 60)));
}
public void setRemotecrawlPPM(int ppm) {
setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL_BUSYSLEEP, 60000 / ppm);
setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL_IDLESLEEP, Math.max(10000, 180000 / ppm));
setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER_BUSYSLEEP, Math.max(15000, 1800000 / ppm));
setConfig(SwitchboardConstants.CRAWLJOB_REMOTE_CRAWL_LOADER_IDLESLEEP, Math.max(30000, 3600000 / ppm));
}
public void initMessages() throws IOException {
this.log.logConfig("Starting Message Board");
final File messageDbFile = new File(workPath, "message.heap");
this.messageDB = new messageBoard(messageDbFile);
this.log.logConfig("Loaded Message Board DB from file " + messageDbFile.getName() +
", " + this.messageDB.size() + " entries" +
", " + ppRamString(messageDbFile.length()/1024));
}
public void initWiki() throws IOException {
this.log.logConfig("Starting Wiki Board");
final File wikiDbFile = new File(workPath, "wiki.heap");
this.wikiDB = new wikiBoard(wikiDbFile, new File(workPath, "wiki-bkp.heap"));
this.log.logConfig("Loaded Wiki Board DB from file " + wikiDbFile.getName() +
", " + this.wikiDB.size() + " entries" +
", " + ppRamString(wikiDbFile.length()/1024));
}
public void initBlog() throws IOException {
this.log.logConfig("Starting Blog");
final File blogDbFile = new File(workPath, "blog.heap");
this.blogDB = new blogBoard(blogDbFile);
this.log.logConfig("Loaded Blog DB from file " + blogDbFile.getName() +
", " + this.blogDB.size() + " entries" +
", " + ppRamString(blogDbFile.length()/1024));
final File blogCommentDbFile = new File(workPath, "blogComment.heap");
this.blogCommentDB = new blogBoardComments(blogCommentDbFile);
this.log.logConfig("Loaded Blog-Comment DB from file " + blogCommentDbFile.getName() +
", " + this.blogCommentDB.size() + " entries" +
", " + ppRamString(blogCommentDbFile.length()/1024));
}
public void initBookmarks() throws IOException{
this.log.logConfig("Loading Bookmarks DB");
final File bookmarksFile = new File(workPath, "bookmarks.heap");
final File tagsFile = new File(workPath, "bookmarkTags.heap");
final File datesFile = new File(workPath, "bookmarkDates.heap");
this.bookmarksDB = new bookmarksDB(bookmarksFile, tagsFile, datesFile);
this.log.logConfig("Loaded Bookmarks DB from files "+ bookmarksFile.getName()+ ", "+tagsFile.getName());
this.log.logConfig(this.bookmarksDB.tagsSize()+" Tag, "+this.bookmarksDB.bookmarksSize()+" Bookmarks");
}
public static Switchboard getSwitchboard(){
return sb;
}
public boolean isRobinsonMode() {
// we are in robinson mode, if we do not exchange index by dht distribution
// we need to take care that search requests and remote indexing requests go only
// to the peers in the same cluster, if we run a robinson cluster.
return !getConfigBool(SwitchboardConstants.INDEX_DIST_ALLOW, false) && !getConfigBool(SwitchboardConstants.INDEX_RECEIVE_ALLOW, false);
}
public boolean isPublicRobinson() {
// robinson peers may be member of robinson clusters, which can be public or private
// this does not check the robinson attribute, only the specific subtype of the cluster
final String clustermode = getConfig(SwitchboardConstants.CLUSTER_MODE, SwitchboardConstants.CLUSTER_MODE_PUBLIC_PEER);
return (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PUBLIC_CLUSTER)) || (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PUBLIC_PEER));
}
public boolean isInMyCluster(final String peer) {
// check if the given peer is in the own network, if this is a robinson cluster
// depending on the robinson cluster type, the peer String may be a peerhash (b64-hash)
// or a ip:port String or simply a ip String
// if this robinson mode does not define a cluster membership, false is returned
if (peer == null) return false;
if (!isRobinsonMode()) return false;
final String clustermode = getConfig(SwitchboardConstants.CLUSTER_MODE, SwitchboardConstants.CLUSTER_MODE_PUBLIC_PEER);
if (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PRIVATE_CLUSTER)) {
// check if we got the request from a peer in the private cluster
final String network = getConfig(SwitchboardConstants.CLUSTER_PEERS_IPPORT, "");
return network.indexOf(peer) >= 0;
} else if (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PUBLIC_CLUSTER)) {
// check if we got the request from a peer in the public cluster
return this.clusterhashes.containsKey(peer.getBytes());
} else {
return false;
}
}
public boolean isInMyCluster(final yacySeed seed) {
// check if the given peer is in the own network, if this is a robinson cluster
// if this robinson mode does not define a cluster membership, false is returned
if (seed == null) return false;
if (!isRobinsonMode()) return false;
final String clustermode = getConfig(SwitchboardConstants.CLUSTER_MODE, SwitchboardConstants.CLUSTER_MODE_PUBLIC_PEER);
if (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PRIVATE_CLUSTER)) {
// check if we got the request from a peer in the private cluster
final String network = getConfig(SwitchboardConstants.CLUSTER_PEERS_IPPORT, "");
return network.indexOf(seed.getPublicAddress()) >= 0;
} else if (clustermode.equals(SwitchboardConstants.CLUSTER_MODE_PUBLIC_CLUSTER)) {
// check if we got the request from a peer in the public cluster
return this.clusterhashes.containsKey(seed.hash.getBytes());
} else {
return false;
}
}
public String urlExists(Segments.Process process, final String hash) {
// tests if hash occurrs in any database
// if it exists, the name of the database is returned,
// if it not exists, null is returned
if (indexSegments.urlMetadata(process).exists(hash)) return "loaded";
return this.crawlQueues.urlExists(hash);
}
public void urlRemove(Segment segment, final String hash) {
segment.urlMetadata().remove(hash);
crawlResults.remove(hash);
crawlQueues.urlRemove(hash);
}
public void urlRemove(Segments.Process process, final String hash) {
indexSegments.urlMetadata(process).remove(hash);
crawlResults.remove(hash);
crawlQueues.urlRemove(hash);
}
public DigestURI getURL(Segments.Process process, final String urlhash) {
if (urlhash == null) return null;
major step forward to network switching (target is easy switch to intranet or other networks .. and back) This change is inspired by the need to see a network connected to the index it creates in a indexing team. It is not possible to divide the network and the index. Therefore all control files for the network was moved to the network within the INDEX/<network-name> subfolder. The remaining YACYDB is superfluous and can be deleted. The yacyDB and yacyNews data structures are now part of plasmaWordIndex. Therefore all methods, using static access to yacySeedDB had to be rewritten. A special problem had been all the port forwarding methods which had been tightly mixed with seed construction. It was not possible to move the port forwarding functions to the place, meaning and usage of plasmaWordIndex. Therefore the port forwarding had been deleted (I guess nobody used it and it can be simulated by methods outside of YaCy). The mySeed.txt is automatically moved to the current network position. A new effect causes that every network will create a different local seed file, which is ok, since the seed identifies the peer only against the network (it is the purpose of the seed hash to give a peer a location within the DHT). No other functional change has been made. The next steps to enable network switcing are: - shift of crawler tables from PLASMADB into the network (crawls are also network-specific) - possibly shift of plasmaWordIndex code into yacy package (index management is network-specific) - servlet to switch networks git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4765 6c8d7289-2bf4-0310-a012-ef5d649a1542
2008-05-06 01:13:47 +02:00
if (urlhash.length() == 0) return null;
final DigestURI ne = crawlQueues.getURL(urlhash);
if (ne != null) return ne;
final URIMetadataRow le = indexSegments.urlMetadata(process).load(urlhash, null, 0);
if (le != null) return le.metadata().url();
return null;
}
public RankingProfile getRanking() {
return (getConfig("rankingProfile", "").length() == 0) ?
new RankingProfile(QueryParams.CONTENTDOM_TEXT) :
new RankingProfile("", crypt.simpleDecode(sb.getConfig("rankingProfile", ""), null));
}
/**
* checks if the proxy, the local search or remote search was accessed some time before
* If no limit is exceeded, null is returned. If a limit is exceeded,
* then the name of the service that caused the caution is returned
* @return
*/
public String onlineCaution() {
if (System.currentTimeMillis() - this.proxyLastAccess < Integer.parseInt(getConfig(SwitchboardConstants.PROXY_ONLINE_CAUTION_DELAY, "30000"))) return "proxy";
if (System.currentTimeMillis() - this.localSearchLastAccess < Integer.parseInt(getConfig(SwitchboardConstants.LOCALSEACH_ONLINE_CAUTION_DELAY, "30000"))) return "localsearch";
if (System.currentTimeMillis() - this.remoteSearchLastAccess < Integer.parseInt(getConfig(SwitchboardConstants.REMOTESEARCH_ONLINE_CAUTION_DELAY, "30000"))) return"remotesearch";
return null;
}
private static String ppRamString(long bytes) {
if (bytes < 1024) return bytes + " KByte";
bytes = bytes / 1024;
if (bytes < 1024) return bytes + " MByte";
bytes = bytes / 1024;
if (bytes < 1024) return bytes + " GByte";
return (bytes / 1024) + "TByte";
}
/**
* {@link CrawlProfile Crawl Profiles} are saved independantly from the queues themselves
* and therefore have to be cleaned up from time to time. This method only performs the clean-up
* if - and only if - the {@link IndexingStack switchboard},
* {@link LoaderDispatcher loader} and {@link plasmaCrawlNURL local crawl} queues are all empty.
* <p>
* Then it iterates through all existing {@link CrawlProfile crawl profiles} and removes
* all profiles which are not hardcoded.
* </p>
* <p>
* <i>If this method encounters DB-failures, the profile DB will be resetted and</i>
* <code>true</code><i> will be returned</i>
* </p>
* @see #CRAWL_PROFILE_PROXY hardcoded
* @see #CRAWL_PROFILE_REMOTE hardcoded
* @see #CRAWL_PROFILE_SNIPPET_TEXT hardcoded
* @see #CRAWL_PROFILE_SNIPPET_MEDIA hardcoded
* @return whether this method has done something or not (i.e. because the queues have been filled
* or there are no profiles left to clean up)
* @throws <b>InterruptedException</b> if the current thread has been interrupted, i.e. by the
* shutdown procedure
*/
public boolean cleanProfiles() throws InterruptedException {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
if ((getActiveQueueSize() > 0) || (crawlQueues.size() > 0) ||
(crawlStacker != null && crawlStacker.size() > 0) ||
(crawlQueues.noticeURL.notEmpty()))
return false;
return this.crawler.cleanProfiles();
}
public void close() {
log.logConfig("SWITCHBOARD SHUTDOWN STEP 1: sending termination signal to managed threads:");
MemoryTracker.stopSystemProfiling();
terminateAllThreads(true);
log.logConfig("SWITCHBOARD SHUTDOWN STEP 2: sending termination signal to threaded indexing");
// closing all still running db importer jobs
indexingDocumentProcessor.announceShutdown();
indexingDocumentProcessor.awaitShutdown(12000);
crawlStacker.announceClose();
indexingCondensementProcessor.announceShutdown();
indexingAnalysisProcessor.announceShutdown();
indexingStorageProcessor.announceShutdown();
dhtDispatcher.close();
indexingCondensementProcessor.awaitShutdown(12000);
indexingAnalysisProcessor.awaitShutdown(12000);
indexingStorageProcessor.awaitShutdown(12000);
crawlStacker.close();
this.dbImportManager.close();
Client.closeAllConnections();
wikiDB.close();
blogDB.close();
blogCommentDB.close();
userDB.close();
bookmarksDB.close();
messageDB.close();
robots.close();
webStructure.flushCitationReference("crg");
webStructure.close();
crawlQueues.close();
crawler.close();
log.logConfig("SWITCHBOARD SHUTDOWN STEP 3: sending termination signal to database manager (stand by...)");
indexSegments.close();
peers.close();
Cache.close();
UPnP.deletePortMapping();
Tray.removeTray();
log.logConfig("SWITCHBOARD SHUTDOWN TERMINATED");
}
/**
* pass a response to the indexer
* @param response
* @return null if successful, an error message othervise
*/
public String toIndexer(final Response response) {
assert response != null;
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
// get next queue entry and start a queue processing
if (response == null) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
if (this.log.isFine()) log.logFine("deQueue: queue entry is null");
return "queue entry is null";
}
if (response.profile() == null) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
if (this.log.isFine()) log.logFine("deQueue: profile is null");
return "profile is null";
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
}
// check if the document should be indexed based on proxy/crawler rules
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
String noIndexReason = "unspecified indexing error";
if (response.processCase(peers.mySeed().hash) == EventOrigin.PROXY_LOAD) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
// proxy-load
noIndexReason = response.shallIndexCacheForProxy();
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
} else {
// normal crawling
noIndexReason = response.shallIndexCacheForCrawler();
}
// check if the parser supports the mime type
if (noIndexReason == null) {
noIndexReason = TextParser.supports(response.url(), response.getMimeType());
}
// check X-YACY-Index-Control
// With the X-YACY-Index-Control header set to "no-index" a client could disallow
// yacy to index the response returned as answer to a request
if (noIndexReason == null && response.requestProhibitsIndexing()) {
noIndexReason = "X-YACY-Index-Control header prohibits indexing";
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
}
// check accepted domain / localhost accesses
if (noIndexReason == null) {
noIndexReason = crawlStacker.urlInAcceptedDomain(response.url());
}
// in the noIndexReason is set, indexing is not allowed
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
if (noIndexReason != null) {
// log cause and close queue
final DigestURI referrerURL = response.referrerURL();
if (log.isFine()) log.logFine("deQueue: not indexed any word in URL " + response.url() + "; cause: " + noIndexReason);
addURLtoErrorDB(response.url(), (referrerURL == null) ? "" : referrerURL.hash(), response.initiator(), response.name(), noIndexReason);
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
// finish this entry
return "not indexed any word in URL " + response.url() + "; cause: " + noIndexReason;
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
}
// put document into the concurrent processing queue
if (log.isFinest()) log.logFinest("deQueue: passing to indexing queue: " + response.url().toNormalform(true, false));
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
try {
this.indexingDocumentProcessor.enQueue(new indexingQueueEntry(Segments.Process.LOCALCRAWLING, response, null, null));
return null;
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
} catch (InterruptedException e) {
e.printStackTrace();
return "interrupted: " + e.getMessage();
}
}
public boolean processSurrogate(String s) {
File surrogateFile = new File(this.surrogatesInPath, s);
File outfile = new File(this.surrogatesOutPath, s);
if (!surrogateFile.exists() || !surrogateFile.canWrite() || !surrogateFile.canRead()) return false;
if (outfile.exists()) return false;
boolean moved = false;
try {
SurrogateReader reader = new SurrogateReader(new BufferedInputStream(new FileInputStream(surrogateFile)), 3);
Thread readerThread = new Thread(reader, "Surrogate-Reader " + surrogateFile.getAbsolutePath());
readerThread.start();
DCEntry surrogate;
Response response;
while ((surrogate = reader.take()) != DCEntry.poison) {
// check if url is in accepted domain
assert surrogate != null;
assert crawlStacker != null;
final String urlRejectReason = crawlStacker.urlInAcceptedDomain(surrogate.getIdentifier());
if (urlRejectReason != null) {
if (this.log.isFine()) this.log.logInfo("Rejected URL '" + surrogate.getIdentifier() + "': " + urlRejectReason);
continue;
}
// create a queue entry
Document document = surrogate.document();
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
Request request = new Request(
peers.mySeed().hash,
surrogate.getIdentifier(),
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
null,
"",
new Date(),
new Date(),
this.crawler.defaultSurrogateProfile.handle(),
0,
0,
0
);
response = new Response(request, null, null, "200", this.crawler.defaultSurrogateProfile);
indexingQueueEntry queueEntry = new indexingQueueEntry(Segments.Process.LOCALCRAWLING, response, document, null);
// place the queue entry into the concurrent process of the condenser (document analysis)
try {
indexingCondensementProcessor.enQueue(queueEntry);
} catch (InterruptedException e) {
e.printStackTrace();
break;
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
moved = surrogateFile.renameTo(outfile);
}
return moved;
}
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
public int surrogateQueueSize() {
// count surrogates
String[] surrogatelist = this.surrogatesInPath.list();
if (surrogatelist.length > 100) return 100;
int count = 0;
for (String s: surrogatelist) {
if (s.endsWith(".xml")) count++;
if (count >= 100) break;
}
return count;
}
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
public void surrogateFreeMem() {
// do nothing
}
public boolean surrogateProcess() {
// work off fresh entries from the proxy or from the crawler
String cautionCause = onlineCaution();
if (cautionCause != null) {
if (this.log.isFine())
log.logFine("deQueue: online caution for " + cautionCause
+ ", omitting resource stack processing");
return false;
}
try {
// check surrogates
String[] surrogatelist = this.surrogatesInPath.list();
if (surrogatelist.length > 0) {
// look if the is any xml inside
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
for (String surrogate: surrogatelist) {
// check for interruption
checkInterruption();
if (surrogate.endsWith(".xml")) {
// read the surrogate file and store entry in index
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
if (processSurrogate(surrogate)) return true;
}
}
}
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
} catch (InterruptedException e) {
return false;
}
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
return false;
}
public static class indexingQueueEntry extends WorkflowJob {
public Segments.Process process;
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
public Response queueEntry;
public Document document;
public Condenser condenser;
public indexingQueueEntry(
final Segments.Process process,
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
final Response queueEntry,
final Document document,
final Condenser condenser) {
super();
this.process = process;
this.queueEntry = queueEntry;
this.document = document;
this.condenser = condenser;
}
}
public int cleanupJobSize() {
int c = 0;
if ((crawlQueues.delegatedURL.stackSize() > 1000)) c++;
if ((crawlQueues.errorURL.stackSize() > 1000)) c++;
for (EventOrigin origin: EventOrigin.values()) {
if (crawlResults.getStackSize(origin) > 1000) c++;
}
return c;
}
public boolean cleanupJob() {
try {
boolean hasDoneSomething = false;
// clear caches if necessary
if (!MemoryControl.request(8000000L, false)) {
for (Segment indexSegment: this.indexSegments) indexSegment.urlMetadata().clearCache();
SearchEventCache.cleanupEvents(true);
}
// set a random password if no password is configured
if (!crawlStacker.acceptLocalURLs() && getConfigBool("adminAccountForLocalhost", false) && getConfig(HTTPDemon.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
// make a 'random' password
setConfig(HTTPDemon.ADMIN_ACCOUNT_B64MD5, "0000" + Digest.encodeMD5Hex(System.getProperties().toString() + System.currentTimeMillis()));
setConfig("adminAccount", "");
}
// refresh recrawl dates
try{
Iterator<CrawlProfile.entry> it = crawler.profilesActiveCrawls.profiles(true);
entry selentry;
while (it.hasNext()) {
selentry = it.next();
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_PROXY))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_PROXY_RECRAWL_CYCLE)));
// if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_REMOTE));
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_LOCAL_TEXT))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_LOCAL_TEXT_RECRAWL_CYCLE)));
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_GLOBAL_TEXT))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_GLOBAL_TEXT_RECRAWL_CYCLE)));
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_LOCAL_MEDIA))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_LOCAL_MEDIA_RECRAWL_CYCLE)));
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_GLOBAL_MEDIA))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_SNIPPET_GLOBAL_MEDIA_RECRAWL_CYCLE)));
if (selentry.name().equals(CrawlSwitchboard.CRAWL_PROFILE_SURROGATE))
crawler.profilesActiveCrawls.changeEntry(selentry, CrawlProfile.entry.RECRAWL_IF_OLDER,
Long.toString(crawler.profilesActiveCrawls.getRecrawlDate(CrawlSwitchboard.CRAWL_PROFILE_SURROGATE_RECRAWL_CYCLE)));
}
} catch (final IOException e) {};
// close unused connections
Client.cleanup();
// do transmission of CR-files
checkInterruption();
int count = rankingOwnDistribution.size() / 100;
if (count == 0) count = 1;
if (count > 5) count = 5;
if (rankingOn && !isRobinsonMode()) {
rankingOwnDistribution.transferRanking(count);
rankingOtherDistribution.transferRanking(1);
}
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
// clean up delegated stack
checkInterruption();
if ((crawlQueues.delegatedURL.stackSize() > 1000)) {
if (this.log.isFine()) log.logFine("Cleaning Delegated-URLs report stack, " + crawlQueues.delegatedURL.stackSize() + " entries on stack");
crawlQueues.delegatedURL.clearStack();
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
hasDoneSomething = true;
}
// clean up error stack
checkInterruption();
if ((crawlQueues.errorURL.stackSize() > 1000)) {
if (this.log.isFine()) log.logFine("Cleaning Error-URLs report stack, " + crawlQueues.errorURL.stackSize() + " entries on stack");
crawlQueues.errorURL.clearStack();
hasDoneSomething = true;
}
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
// clean up loadedURL stack
for (EventOrigin origin: EventOrigin.values()) {
checkInterruption();
if (crawlResults.getStackSize(origin) > 1000) {
if (this.log.isFine()) log.logFine("Cleaning Loaded-URLs report stack, " + crawlResults.getStackSize(origin) + " entries on stack " + origin.getCode());
crawlResults.clearStack(origin);
hasDoneSomething = true;
}
}
// clean up image stack
ResultImages.clearQueues();
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
// clean up profiles
checkInterruption();
if (cleanProfiles()) hasDoneSomething = true;
// clean up news
checkInterruption();
try {
if (this.log.isFine()) log.logFine("Cleaning Incoming News, " + this.peers.newsPool.size(yacyNewsPool.INCOMING_DB) + " entries on stack");
if (this.peers.newsPool.automaticProcess(peers) > 0) hasDoneSomething = true;
} catch (final IOException e) {}
if (getConfigBool("cleanup.deletionProcessedNews", true)) {
this.peers.newsPool.clear(yacyNewsPool.PROCESSED_DB);
}
if (getConfigBool("cleanup.deletionPublishedNews", true)) {
this.peers.newsPool.clear(yacyNewsPool.PUBLISHED_DB);
}
// clean up seed-dbs
if(getConfigBool("routing.deleteOldSeeds.permission",true)) {
final long deleteOldSeedsTime = getConfigLong("routing.deleteOldSeeds.time",7)*24*3600000;
Iterator<yacySeed> e = this.peers.seedsSortedDisconnected(true,yacySeed.LASTSEEN);
yacySeed seed = null;
final ArrayList<String> deleteQueue = new ArrayList<String>();
checkInterruption();
//clean passive seeds
while(e.hasNext()) {
seed = e.next();
if(seed != null) {
//list is sorted -> break when peers are too young to delete
if(seed.getLastSeenUTC() > (System.currentTimeMillis()-deleteOldSeedsTime))
break;
deleteQueue.add(seed.hash);
}
}
for(int i=0;i<deleteQueue.size();++i) this.peers.removeDisconnected(deleteQueue.get(i));
deleteQueue.clear();
e = this.peers.seedsSortedPotential(true,yacySeed.LASTSEEN);
checkInterruption();
//clean potential seeds
while(e.hasNext()) {
seed = e.next();
if(seed != null) {
//list is sorted -> break when peers are too young to delete
if(seed.getLastSeenUTC() > (System.currentTimeMillis()-deleteOldSeedsTime))
break;
deleteQueue.add(seed.hash);
}
}
for (int i = 0; i < deleteQueue.size(); ++i) this.peers.removePotential(deleteQueue.get(i));
}
// check if update is available and
// if auto-update is activated perform an automatic installation and restart
final yacyRelease updateVersion = yacyRelease.rulebasedUpdateInfo(false);
if (updateVersion != null) {
// there is a version that is more recent. Load it and re-start with it
log.logInfo("AUTO-UPDATE: downloading more recent release " + updateVersion.getUrl());
final File downloaded = updateVersion.downloadRelease();
final boolean devenvironment = new File(this.getRootPath(), ".svn").exists();
if (devenvironment) {
log.logInfo("AUTO-UPDATE: omiting update because this is a development environment");
} else if ((downloaded == null) || (!downloaded.exists()) || (downloaded.length() == 0)) {
log.logInfo("AUTO-UPDATE: omiting update because download failed (file cannot be found, is too small or signature is bad)");
} else {
yacyRelease.deployRelease(downloaded);
terminate(5000);
log.logInfo("AUTO-UPDATE: deploy and restart initiated");
}
}
// initiate broadcast about peer startup to spread supporter url
if (this.peers.newsPool.size(yacyNewsPool.OUTGOING_DB) == 0) {
// read profile
final Properties profile = new Properties();
FileInputStream fileIn = null;
try {
fileIn = new FileInputStream(new File("DATA/SETTINGS/profile.txt"));
profile.load(fileIn);
} catch(final IOException e) {
} finally {
if (fileIn != null) try { fileIn.close(); } catch (final Exception e) {}
}
final String homepage = (String) profile.get("homepage");
if ((homepage != null) && (homepage.length() > 10)) {
final Properties news = new Properties();
news.put("homepage", profile.get("homepage"));
this.peers.newsPool.publishMyNews(yacyNewsRecord.newRecord(peers.mySeed(), yacyNewsPool.CATEGORY_PROFILE_BROADCAST, news));
}
}
// update the cluster set
this.clusterhashes = this.peers.clusterHashes(getConfig("cluster.peers.yacydomain", ""));
// after all clean up is done, check the resource usage
observer.resourceObserverJob();
return hasDoneSomething;
} catch (final InterruptedException e) {
this.log.logInfo("cleanupJob: Shutdown detected");
return false;
}
}
/**
* With this function the crawling process can be paused
* @param jobType
*/
public void pauseCrawlJob(final String jobType) {
final Object[] status = this.crawlJobsStatus.get(jobType);
synchronized(status[SwitchboardConstants.CRAWLJOB_SYNC]) {
status[SwitchboardConstants.CRAWLJOB_STATUS] = Boolean.TRUE;
}
setConfig(jobType + "_isPaused", "true");
}
/**
* Continue the previously paused crawling
* @param jobType
*/
public void continueCrawlJob(final String jobType) {
final Object[] status = this.crawlJobsStatus.get(jobType);
synchronized(status[SwitchboardConstants.CRAWLJOB_SYNC]) {
if (((Boolean)status[SwitchboardConstants.CRAWLJOB_STATUS]).booleanValue()) {
status[SwitchboardConstants.CRAWLJOB_STATUS] = Boolean.FALSE;
status[SwitchboardConstants.CRAWLJOB_SYNC].notifyAll();
}
}
setConfig(jobType + "_isPaused", "false");
}
/**
* @param jobType
* @return <code>true</code> if crawling was paused or <code>false</code> otherwise
*/
public boolean crawlJobIsPaused(final String jobType) {
final Object[] status = this.crawlJobsStatus.get(jobType);
synchronized(status[SwitchboardConstants.CRAWLJOB_SYNC]) {
return ((Boolean)status[SwitchboardConstants.CRAWLJOB_STATUS]).booleanValue();
}
}
public indexingQueueEntry parseDocument(final indexingQueueEntry in) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
in.queueEntry.updateStatus(Response.QUEUE_STATE_PARSING);
// debug
if (log.isFinest()) log.logFinest("PARSE "+ in.queueEntry.toString());
Document document = null;
try {
document = parseDocument(in.queueEntry);
} catch (final InterruptedException e) {
document = null;
} catch (final Exception e) {
document = null;
}
if (document == null) {
return null;
}
return new indexingQueueEntry(in.process, in.queueEntry, document, null);
}
private Document parseDocument(Response response) throws InterruptedException {
Document document = null;
final EventOrigin processCase = response.processCase(peers.mySeed().hash);
if (this.log.isFine()) log.logFine("processResourceStack processCase=" + processCase +
", depth=" + response.depth() +
", maxDepth=" + ((response.profile() == null) ? "null" : Integer.toString(response.profile().depth())) +
", must-match=" + ((response.profile() == null) ? "null" : response.profile().mustMatchPattern().toString()) +
", must-not-match=" + ((response.profile() == null) ? "null" : response.profile().mustNotMatchPattern().toString()) +
", initiatorHash=" + response.initiator() +
//", responseHeader=" + ((entry.responseHeader() == null) ? "null" : entry.responseHeader().toString()) +
", url=" + response.url()); // DEBUG
// PARSE CONTENT
final long parsingStartTime = System.currentTimeMillis();
byte[] b = null;
try {
// fetch the document
b = Cache.getContent(response.url());
if (b == null) {
this.log.logWarning("the resource '" + response.url() + "' is missing in the cache.");
addURLtoErrorDB(response.url(), response.referrerHash(), response.initiator(), response.name(), "missing");
return null;
}
} catch (IOException e) {
this.log.logWarning("Unable fetch the resource '" + response.url() + "'. from the cache: " + e.getMessage());
addURLtoErrorDB(response.url(), response.referrerHash(), response.initiator(), response.name(), e.getMessage());
return null;
}
try {
// parse the document
document = TextParser.parseSource(response.url(), response.getMimeType(), response.getCharacterEncoding(), b);
assert(document != null) : "Unexpected error. Parser returned null.";
} catch (final ParserException e) {
this.log.logWarning("Unable to parse the resource '" + response.url() + "'. " + e.getMessage(), e);
addURLtoErrorDB(response.url(), response.referrerHash(), response.initiator(), response.name(), e.getMessage());
if (document != null) {
document.close();
document = null;
}
return null;
}
final long parsingEndTime = System.currentTimeMillis();
// get the document date
final Date docDate = response.lastModified();
// put anchors on crawl stack
final long stackStartTime = System.currentTimeMillis();
if (
((processCase == EventOrigin.PROXY_LOAD) || (processCase == EventOrigin.LOCAL_CRAWLING)) &&
((response.profile() == null) || (response.depth() < response.profile().depth()))
) {
// get the hyperlinks
final Map<DigestURI, String> hl = document.getHyperlinks();
// add all images also to the crawl stack
for (ImageEntry imageReference : document.getImages().values()) {
hl.put(imageReference.url(), imageReference.alt());
}
// insert those hyperlinks to the crawler
DigestURI nextUrl;
for (Map.Entry<DigestURI, String> nextEntry : hl.entrySet()) {
// check for interruption
checkInterruption();
// process the next hyperlink
nextUrl = nextEntry.getKey();
String u = nextUrl.toNormalform(true, true);
if (!(u.startsWith("http") || u.startsWith("ftp"))) continue;
// enqueue the hyperlink into the pre-notice-url db
crawlStacker.enqueueEntry(new Request(
response.initiator(),
nextUrl,
response.url().hash(),
nextEntry.getValue(),
null,
docDate,
response.profile().handle(),
response.depth() + 1,
0,
0
));
}
final long stackEndTime = System.currentTimeMillis();
if (log.isInfo()) log.logInfo("CRAWL: ADDED " + hl.size() + " LINKS FROM " + response.url().toNormalform(false, true) +
", STACKING TIME = " + (stackEndTime-stackStartTime) +
", PARSING TIME = " + (parsingEndTime-parsingStartTime));
}
return document;
}
public indexingQueueEntry condenseDocument(final indexingQueueEntry in) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
in.queueEntry.updateStatus(Response.QUEUE_STATE_CONDENSING);
// debug
if (log.isFinest()) log.logFinest("CONDENSE "+ in.queueEntry.toString());
// strip out words and generate statistics
if (this.log.isFine()) log.logFine("Condensing for '" + in.queueEntry.url().toNormalform(false, true) + "'");
try {
Condenser condenser = new Condenser(in.document, in.queueEntry.profile().indexText(), in.queueEntry.profile().indexMedia());
// update image result list statistics
// its good to do this concurrently here, because it needs a DNS lookup
// to compute a URL hash which is necessary for a double-check
final CrawlProfile.entry profile = in.queueEntry.profile();
ResultImages.registerImages(in.document, (profile == null) ? true : !profile.remoteIndexing());
return new indexingQueueEntry(in.process, in.queueEntry, in.document, condenser);
} catch (final UnsupportedEncodingException e) {
return null;
}
}
public indexingQueueEntry webStructureAnalysis(final indexingQueueEntry in) {
in.queueEntry.updateStatus(Response.QUEUE_STATE_STRUCTUREANALYSIS);
final Integer[] ioLinks = webStructure.generateCitationReference(in.document, in.condenser, in.queueEntry.lastModified()); // [outlinksSame, outlinksOther]
in.document.setInboundLinks(ioLinks[0].intValue());
in.document.setOutboundLinks(ioLinks[1].intValue());
return in;
}
public void storeDocumentIndex(final indexingQueueEntry in) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
in.queueEntry.updateStatus(Response.QUEUE_STATE_INDEXSTORAGE);
storeDocumentIndex(in.process, in.queueEntry, in.document, in.condenser);
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
in.queueEntry.updateStatus(Response.QUEUE_STATE_FINISHED);
}
private void storeDocumentIndex(Segments.Process process, final Response queueEntry, final Document document, final Condenser condenser) {
// CREATE INDEX
final String dc_title = document.dc_title();
final DigestURI referrerURL = queueEntry.referrerURL();
final EventOrigin processCase = queueEntry.processCase(peers.mySeed().hash);
// remove stopwords
log.logInfo("Excluded " + condenser.excludeWords(stopwords) + " words in URL " + queueEntry.url());
// STORE URL TO LOADED-URL-DB
URIMetadataRow newEntry = null;
try {
newEntry = indexSegments.segment(process).storeDocument(
queueEntry.url(),
referrerURL,
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
queueEntry.lastModified(),
queueEntry.size(),
document,
condenser);
RSSFeed.channels((queueEntry.initiator().equals(peers.mySeed().hash)) ? RSSFeed.LOCALINDEXING : RSSFeed.REMOTEINDEXING).addMessage(new RSSMessage("Indexed web page", dc_title, queueEntry.url().toNormalform(true, false)));
} catch (final IOException e) {
if (this.log.isFine()) log.logFine("Not Indexed Resource '" + queueEntry.url().toNormalform(false, true) + "': process case=" + processCase);
addURLtoErrorDB(queueEntry.url(), referrerURL.hash(), queueEntry.initiator(), dc_title, "error storing url: " + e.getMessage());
return;
}
// update url result list statistics
crawlResults.stack(
newEntry, // loaded url db entry
queueEntry.initiator(), // initiator peer hash
this.peers.mySeed().hash, // executor peer hash
processCase // process case
);
// STORE WORD INDEX
if ((!queueEntry.profile().indexText()) && (!queueEntry.profile().indexMedia())) {
if (this.log.isFine()) log.logFine("Not Indexed Resource '" + queueEntry.url().toNormalform(false, true) + "': process case=" + processCase);
addURLtoErrorDB(queueEntry.url(), referrerURL.hash(), queueEntry.initiator(), dc_title, "unknown indexing process case" + processCase);
return;
}
// increment number of indexed urls
indexedPages++;
// update profiling info
if (System.currentTimeMillis() - lastPPMUpdate > 20000) {
// we don't want to do this too often
updateMySeed();
MemoryTracker.update("ppm", Long.valueOf(currentPPM()), true);
lastPPMUpdate = System.currentTimeMillis();
}
MemoryTracker.update("indexed", queueEntry.url().toNormalform(true, false), false);
// if this was performed for a remote crawl request, notify requester
if ((processCase == EventOrigin.GLOBAL_CRAWLING) && (queueEntry.initiator() != null)) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
final yacySeed initiatorPeer = peers.get(queueEntry.initiator());
if (initiatorPeer != null) {
log.logInfo("Sending crawl receipt for '" + queueEntry.url().toNormalform(false, true) + "' to " + initiatorPeer.getName());
if (clusterhashes != null) initiatorPeer.setAlternativeAddress(clusterhashes.get(queueEntry.initiator().getBytes()));
// start a thread for receipt sending to avoid a blocking here
new Thread(new receiptSending(initiatorPeer, newEntry), "sending receipt to " + queueEntry.initiator()).start();
}
}
}
public class receiptSending implements Runnable {
yacySeed initiatorPeer;
URIMetadataRow reference;
public receiptSending(final yacySeed initiatorPeer, final URIMetadataRow reference) {
this.initiatorPeer = initiatorPeer;
this.reference = reference;
}
public void run() {
yacyClient.crawlReceipt(peers.mySeed(), initiatorPeer, "crawl", "fill", "indexed", reference, "");
}
}
private static SimpleDateFormat DateFormat1 = new SimpleDateFormat("EEE, dd MMM yyyy");
public static String dateString(final Date date) {
if (date == null) return "";
return DateFormat1.format(date);
}
// we need locale independent RFC-822 dates at some places
private static SimpleDateFormat DateFormatter822 = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss Z", Locale.US);
public static String dateString822(final Date date) {
if (date == null) return "";
try {
return DateFormatter822.format(date);
} catch (Exception e) {
e.printStackTrace();
return DateFormatter822.format(new Date());
}
}
public int adminAuthenticated(final RequestHeader requestHeader) {
// authorization for localhost, only if flag is set to grant localhost access as admin
final String clientIP = requestHeader.get(HeaderFramework.CONNECTION_PROP_CLIENTIP, "");
final String refererHost = requestHeader.refererHost();
final boolean accessFromLocalhost = serverCore.isLocalhost(clientIP) && (refererHost.length() == 0 || serverCore.isLocalhost(refererHost));
if (getConfigBool("adminAccountForLocalhost", false) && accessFromLocalhost) return 3; // soft-authenticated for localhost
// get the authorization string from the header
final String authorization = (requestHeader.get(RequestHeader.AUTHORIZATION, "xxxxxx")).trim().substring(6);
// security check against too long authorization strings
if (authorization.length() > 256) return 0;
// authorization by encoded password, only for localhost access
final String adminAccountBase64MD5 = getConfig(HTTPDemon.ADMIN_ACCOUNT_B64MD5, "");
if (accessFromLocalhost && (adminAccountBase64MD5.equals(authorization))) return 3; // soft-authenticated for localhost
// authorization by hit in userDB
if (userDB.hasAdminRight(requestHeader.get(RequestHeader.AUTHORIZATION, "xxxxxx"), requestHeader.getHeaderCookies())) return 4; //return, because 4=max
// authorization with admin keyword in configuration
return HTTPDemon.staticAdminAuthenticated(authorization, this);
}
public boolean verifyAuthentication(final RequestHeader header, final boolean strict) {
// handle access rights
switch (adminAuthenticated(header)) {
case 0: // wrong password given
//try { Thread.sleep(3000); } catch (final InterruptedException e) { } // prevent brute-force
return false;
case 1: // no password given
return false;
case 2: // no password stored
return !strict;
case 3: // soft-authenticated for localhost only
return true;
case 4: // hard-authenticated, all ok
return true;
}
return false;
}
public void setPerformance(int wantedPPM) {
// we consider 3 cases here
// wantedPPM <= 10: low performance
// 10 < wantedPPM < 1000: custom performance
// 1000 <= wantedPPM : maximum performance
if (wantedPPM <= 10) wantedPPM = 10;
if (wantedPPM >= 6000) wantedPPM = 6000;
final int newBusySleep = 60000 / wantedPPM; // for wantedPPM = 10: 6000; for wantedPPM = 1000: 60
BusyThread thread;
thread = getThread(SwitchboardConstants.INDEX_DIST);
if (thread != null) {
setConfig(SwitchboardConstants.INDEX_DIST_BUSYSLEEP , thread.setBusySleep(Math.max(2000, thread.setBusySleep(newBusySleep * 2))));
thread.setIdleSleep(30000);
}
thread = getThread(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
if (thread != null) {
setConfig(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL_BUSYSLEEP , thread.setBusySleep(newBusySleep));
thread.setIdleSleep(2000);
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
}
}
public static int accessFrequency(final HashMap<String, TreeSet<Long>> tracker, final String host) {
// returns the access frequency in queries per hour for a given host and a specific tracker
final long timeInterval = 1000 * 60 * 60;
final TreeSet<Long> accessSet = tracker.get(host);
if (accessSet == null) return 0;
return accessSet.tailSet(Long.valueOf(System.currentTimeMillis() - timeInterval)).size();
}
public String dhtShallTransfer(String segment) {
String cautionCause = onlineCaution();
if (cautionCause != null) {
return "online caution for " + cautionCause + ", dht transmission";
}
if (this.peers == null) {
return "no DHT distribution: seedDB == null";
}
if (this.peers.mySeed() == null) {
return "no DHT distribution: mySeed == null";
}
if (this.peers.mySeed().isVirgin()) {
return "no DHT distribution: status is virgin";
}
if (this.peers.noDHTActivity()) {
return "no DHT distribution: network too small";
}
if (!this.getConfigBool("network.unit.dht", true)) {
return "no DHT distribution: disabled by network.unit.dht";
}
if (getConfig(SwitchboardConstants.INDEX_DIST_ALLOW, "false").equalsIgnoreCase("false")) {
return "no DHT distribution: not enabled (per setting)";
}
Segment indexSegment = this.indexSegments.segment(segment);
if (indexSegment.urlMetadata().size() < 10) {
return "no DHT distribution: loadedURL.size() = " + indexSegment.urlMetadata().size();
}
if (indexSegment.termIndex().sizesMax() < 100) {
return "no DHT distribution: not enough words - wordIndex.size() = " + indexSegment.termIndex().sizesMax();
}
if ((getConfig(SwitchboardConstants.INDEX_DIST_ALLOW_WHILE_CRAWLING, "false").equalsIgnoreCase("false")) && (crawlQueues.noticeURL.notEmptyLocal())) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
return "no DHT distribution: crawl in progress: noticeURL.stackSize() = " + crawlQueues.noticeURL.size() + ", sbQueue.size() = " + getActiveQueueSize();
}
if ((getConfig(SwitchboardConstants.INDEX_DIST_ALLOW_WHILE_INDEXING, "false").equalsIgnoreCase("false")) && (getActiveQueueSize() > 1)) {
removed the indexing queue. This queue was superfluous since the introduction of the blocking queues last year, where documents are parsed, analysed and stored in the index with concurrency. - The indexing queue was a historic data structure that was introduced at the very beginning at the project as a part of the switchboard organisation object structure. Without the indexing queue the switchboard queue becomes also superfluous. It has been removed as well. - Removing the switchboard queue requires that all servlets are called without a opaque generic ('<?>'). That caused that all serlets had to be modified. - Many servlets displayed the indexing queue or the size of that queue. In the past months the indexer was so fast that mostly the indexing queue appeared empty, so there was no use of it any more. Because the queue has been removed, the display in the servlets had also to be removed. - The surrogate work task had been a part of the indexing queue control structure. Without the indexing queue the surrogates needed its own task management. That has been integrated here. - Because the indexing queue had a special queue entry object and properties attached to this object, the propterties had to be moved to the queue entry object which is part of the new indexing queue withing the blocking queue, the Response Object. That object has now also the new properties of the removed indexing queue entry object. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6225 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-07-17 15:59:21 +02:00
return "no DHT distribution: indexing in progress: noticeURL.stackSize() = " + crawlQueues.noticeURL.size() + ", sbQueue.size() = " + getActiveQueueSize();
}
return null; // this means; yes, please do dht transfer
}
public boolean dhtTransferJob() {
return dhtTransferJob(getConfig(SwitchboardConstants.SEGMENT_DHTOUT, "default"));
}
public boolean dhtTransferJob(String segment) {
final String rejectReason = dhtShallTransfer(segment);
if (rejectReason != null) {
if (this.log.isFine()) log.logFine(rejectReason);
return false;
}
boolean hasDoneSomething = false;
if (this.dhtDispatcher.cloudSize() > this.peers.scheme.verticalPartitions() * 4) {
log.logInfo("dhtTransferJob: no selection, too many entries in transmission cloud: " + this.dhtDispatcher.cloudSize());
} else if (MemoryControl.available() < 1024*1024*25) {
log.logInfo("dhtTransferJob: no selection, too less memory available : " + (MemoryControl.available() / 1024 / 1024) + " MB");
} else {
byte[] startHash = null, limitHash = null;
int tries = 10;
while (tries-- > 0) {
startHash = PeerSelection.selectTransferStart();
assert startHash != null;
limitHash = PeerSelection.limitOver(this.peers, startHash);
if (limitHash != null) break;
}
if (limitHash == null || startHash == null) {
log.logInfo("dhtTransferJob: approaching full DHT dispersion.");
return false;
}
log.logInfo("dhtTransferJob: selected " + new String(startHash) + " as start hash");
log.logInfo("dhtTransferJob: selected " + new String(limitHash) + " as limit hash");
try {
boolean enqueued = this.dhtDispatcher.selectContainersEnqueueToCloud(
startHash,
limitHash,
dhtMaxContainerCount,
dhtMaxReferenceCount,
5000);
hasDoneSomething = hasDoneSomething | enqueued;
log.logInfo("dhtTransferJob: result from enqueueing: " + ((enqueued) ? "true" : "false"));
} catch (IOException e) {
log.logSevere("dhtTransferJob: interrupted with exception: " + e.getMessage(), e);
return false;
}
}
if (this.dhtDispatcher.transmissionSize() >= 10) {
log.logInfo("dhtTransferJob: no dequeueing from cloud to transmission: too many concurrent sessions: " + this.dhtDispatcher.transmissionSize());
} else {
boolean dequeued = this.dhtDispatcher.dequeueContainer();
hasDoneSomething = hasDoneSomething | dequeued;
log.logInfo("dhtTransferJob: result from dequeueing: " + ((dequeued) ? "true" : "false"));
}
return hasDoneSomething;
}
replaced old DHT transmission method with new method. Many things have changed! some of them: - after a index selection is made, the index is splitted into its vertical components - from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue - each splitted chunk gets its own transmission thread - multiple transmission threads are started concurrently - the process can be monitored with the blocking queue servlet To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed. The new index distribution model using a vertical DHT was implemented. An abstraction of this model is implemented in the new dht package as interface. The freeworld network has now a configuration of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free. This modification has three main targets: - enhance the DHT transmission speed - with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times. - the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before. with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs. BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS. git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
private void addURLtoErrorDB(
final DigestURI url,
final String referrerHash,
final String initiator,
final String name,
final String failreason
) {
major step forward to network switching (target is easy switch to intranet or other networks .. and back) This change is inspired by the need to see a network connected to the index it creates in a indexing team. It is not possible to divide the network and the index. Therefore all control files for the network was moved to the network within the INDEX/<network-name> subfolder. The remaining YACYDB is superfluous and can be deleted. The yacyDB and yacyNews data structures are now part of plasmaWordIndex. Therefore all methods, using static access to yacySeedDB had to be rewritten. A special problem had been all the port forwarding methods which had been tightly mixed with seed construction. It was not possible to move the port forwarding functions to the place, meaning and usage of plasmaWordIndex. Therefore the port forwarding had been deleted (I guess nobody used it and it can be simulated by methods outside of YaCy). The mySeed.txt is automatically moved to the current network position. A new effect causes that every network will create a different local seed file, which is ok, since the seed identifies the peer only against the network (it is the purpose of the seed hash to give a peer a location within the DHT). No other functional change has been made. The next steps to enable network switcing are: - shift of crawler tables from PLASMADB into the network (crawls are also network-specific) - possibly shift of plasmaWordIndex code into yacy package (index management is network-specific) - servlet to switch networks git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4765 6c8d7289-2bf4-0310-a012-ef5d649a1542
2008-05-06 01:13:47 +02:00
assert initiator != null;
// create a new errorURL DB entry
final Request bentry = new Request(
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
initiator,
url,
referrerHash,
(name == null) ? "" : name,
new Date(),
null,
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
null,
0,
0,
0);
crawlQueues.errorURL.push(bentry, initiator, new Date(), 0, failreason);
redesigned NURL-handling: - the general NURL-index for all crawl stack types was splitted into separate indexes for these stacks - the new NURL-index is managed by the crawl balancer - the crawl balancer does not need an internal index any more, it is replaced by the NURL-index - the NURL.Entry was generalized and is now a new class plasmaCrawlEntry - the new class plasmaCrawlEntry replaces also the preNURL.Entry class, and will also replace the switchboardEntry class in the future - the new class plasmaCrawlEntry is more accurate for date entries (holds milliseconds) and can contain larger 'name' entries (anchor tag names) - the EURL object was replaced by a new ZURL object, which is a container for the plasmaCrawlEntry and some tracking information - the EURL index is now filled with ZURL objects - a new index delegatedURL holds ZURL objects about plasmaCrawlEntry obects to track which url is handed over to other peers - redesigned handling of plasmaCrawlEntry - handover, because there is no need any more to convert one entry object into another - found and fixed numerous bugs in the context of crawl state handling - fixed a serious bug in kelondroCache which caused that entries could not be removed - fixed some bugs in online interface and adopted monitor output to new entry objects - adopted yacy protocol to handle new delegatedURL entries all old crawl queues will disappear after this update! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3483 6c8d7289-2bf4-0310-a012-ef5d649a1542
2007-03-16 14:25:56 +01:00
}
public int currentPPM() {
return MemoryTracker.countEvents("indexed", 20000) * 3;
}
public String makeDefaultPeerName() {
String name = myPublicIP() + "-" + yacyCore.speedKey + "dpn" + OS.infoKey() + (System.currentTimeMillis() & 99);
name = name.replace('.', '-');
name = name.replace('_', '-');
name = name.replace(':', '-');
return name;
}
public void updateMySeed() {
if (getConfig("peerName", "anomic").equals("anomic")) {
// generate new peer name
setConfig("peerName", makeDefaultPeerName());
}
peers.mySeed().put(yacySeed.NAME, getConfig("peerName", "nameless"));
peers.mySeed().put(yacySeed.PORT, Integer.toString(serverCore.getPortNr(getConfig("port", "8080"))));
//the speed of indexing (pages/minute) of the peer
final long uptime = (System.currentTimeMillis() - serverCore.startupTime) / 1000;
peers.mySeed().put(yacySeed.ISPEED, Integer.toString(currentPPM()));
totalQPM = requestedQueries * 60d / Math.max(uptime, 1d);
peers.mySeed().put(yacySeed.RSPEED, Double.toString(totalQPM /*Math.max((float) requestcdiff, 0f) * 60f / Math.max((float) uptimediff, 1f)*/ ));
peers.mySeed().put(yacySeed.UPTIME, Long.toString(uptime/60)); // the number of minutes that the peer is up in minutes/day (moving average MA30)
peers.mySeed().put(yacySeed.LCOUNT, Integer.toString(indexSegments.URLCount())); // the number of links that the peer has stored (LURL's)
peers.mySeed().put(yacySeed.NCOUNT, Integer.toString(crawlQueues.noticeURL.size())); // the number of links that the peer has noticed, but not loaded (NURL's)
peers.mySeed().put(yacySeed.RCOUNT, Integer.toString(crawlQueues.noticeURL.stackSize(NoticedURL.STACK_TYPE_LIMIT))); // the number of links that the peer provides for remote crawling (ZURL's)
peers.mySeed().put(yacySeed.ICOUNT, Integer.toString(indexSegments.RWICount())); // the minimum number of words that the peer has indexed (as it says)
peers.mySeed().put(yacySeed.SCOUNT, Integer.toString(peers.sizeConnected())); // the number of seeds that the peer has stored
peers.mySeed().put(yacySeed.CCOUNT, Double.toString(((int) ((peers.sizeConnected() + peers.sizeDisconnected() + peers.sizePotential()) * 60.0 / (uptime + 1.01)) * 100) / 100.0)); // the number of clients that the peer connects (as connects/hour)
peers.mySeed().put(yacySeed.VERSION, yacyBuildProperties.getLongVersion());
peers.mySeed().setFlagDirectConnect(true);
peers.mySeed().setLastSeenUTC();
peers.mySeed().put(yacySeed.UTC, DateFormatter.UTCDiffString());
peers.mySeed().setFlagAcceptRemoteCrawl(getConfig("crawlResponse", "").equals("true"));
peers.mySeed().setFlagAcceptRemoteIndex(getConfig("allowReceiveIndex", "").equals("true"));
//mySeed.setFlagAcceptRemoteIndex(true);
}
public void loadSeedLists() {
// uses the superseed to initialize the database with known seeds
yacySeed ys;
String seedListFileURL;
DigestURI url;
Iterator<String> enu;
int lc;
final int sc = peers.sizeConnected();
ResponseHeader header;
yacyCore.log.logInfo("BOOTSTRAP: " + sc + " seeds known from previous run");
// - use the superseed to further fill up the seedDB
int ssc = 0, c = 0;
while (true) {
if (Thread.currentThread().isInterrupted()) break;
seedListFileURL = sb.getConfig("network.unit.bootstrap.seedlist" + c, "");
if (seedListFileURL.length() == 0) break;
c++;
if (
seedListFileURL.startsWith("http://") ||
seedListFileURL.startsWith("https://")
) {
// load the seed list
try {
final RequestHeader reqHeader = new RequestHeader();
reqHeader.put(HeaderFramework.PRAGMA, "no-cache");
reqHeader.put(HeaderFramework.CACHE_CONTROL, "no-cache");
reqHeader.put(HeaderFramework.USER_AGENT, HTTPLoader.yacyUserAgent);
url = new DigestURI(seedListFileURL, null);
final long start = System.currentTimeMillis();
header = Client.whead(url.toString(), reqHeader);
final long loadtime = System.currentTimeMillis() - start;
if (header == null) {
if (loadtime > getConfigLong("bootstrapLoadTimeout", 6000)) {
yacyCore.log.logWarning("BOOTSTRAP: seed-list URL " + seedListFileURL + " not available, time-out after " + loadtime + " milliseconds");
} else {
yacyCore.log.logWarning("BOOTSTRAP: seed-list URL " + seedListFileURL + " not available, no content");
}
} else if (header.lastModified() == null) {
yacyCore.log.logWarning("BOOTSTRAP: seed-list URL " + seedListFileURL + " not usable, last-modified is missing");
} else if ((header.age() > 86400000) && (ssc > 0)) {
yacyCore.log.logInfo("BOOTSTRAP: seed-list URL " + seedListFileURL + " too old (" + (header.age() / 86400000) + " days)");
} else {
ssc++;
final byte[] content = Client.wget(url.toString(), reqHeader, (int) getConfigLong("bootstrapLoadTimeout", 20000));
enu = FileUtils.strings(content);
lc = 0;
while (enu.hasNext()) {
ys = yacySeed.genRemoteSeed(enu.next(), null, false);
if ((ys != null) &&
((!peers.mySeedIsDefined()) || !peers.mySeed().hash.equals(ys.hash))) {
if (peers.peerActions.connectPeer(ys, false)) lc++;
//seedDB.writeMap(ys.hash, ys.getMap(), "init");
//System.out.println("BOOTSTRAP: received peer " + ys.get(yacySeed.NAME, "anonymous") + "/" + ys.getAddress());
//lc++;
}
}
yacyCore.log.logInfo("BOOTSTRAP: " + lc + " seeds from seed-list URL " + seedListFileURL + ", AGE=" + (header.age() / 3600000) + "h");
}
} catch (final IOException e) {
// this is when wget fails, commonly because of timeout
yacyCore.log.logWarning("BOOTSTRAP: failed (1) to load seeds from seed-list URL " + seedListFileURL + ": " + e.getMessage());
} catch (final Exception e) {
// this is when wget fails; may be because of missing internet connection
yacyCore.log.logSevere("BOOTSTRAP: failed (2) to load seeds from seed-list URL " + seedListFileURL + ": " + e.getMessage(), e);
}
}
}
yacyCore.log.logInfo("BOOTSTRAP: " + (peers.sizeConnected() - sc) + " new seeds while bootstraping.");
}
public void checkInterruption() throws InterruptedException {
final Thread curThread = Thread.currentThread();
if ((curThread instanceof WorkflowThread) && ((WorkflowThread)curThread).shutdownInProgress()) throw new InterruptedException("Shutdown in progress ...");
else if (this.terminate || curThread.isInterrupted()) throw new InterruptedException("Shutdown in progress ...");
}
public void terminate(final long delay) {
if (delay <= 0) throw new IllegalArgumentException("The shutdown delay must be greater than 0.");
(new delayedShutdown(this,delay)).start();
}
public void terminate() {
this.terminate = true;
this.shutdownSync.release();
}
public boolean isTerminated() {
return this.terminate;
}
public boolean waitForShutdown() throws InterruptedException {
this.shutdownSync.acquire();
return this.terminate;
}
/**
* loads the url as Map
*
* Strings like abc=123 are parsed as pair: abc => 123
*
* @param url
* @return
*/
public static Map<String, String> loadHashMap(final DigestURI url) {
try {
// sending request
final RequestHeader reqHeader = new RequestHeader();
reqHeader.put(HeaderFramework.USER_AGENT, HTTPLoader.yacyUserAgent);
final HashMap<String, String> result = FileUtils.table(Client.wget(url.toString(), reqHeader, 10000));
if (result == null) return new HashMap<String, String>();
return result;
} catch (final Exception e) {
return new HashMap<String, String>();
}
}
}
class delayedShutdown extends Thread {
private final Switchboard sb;
private final long delay;
public delayedShutdown(final Switchboard sb, final long delay) {
this.sb = sb;
this.delay = delay;
}
public void run() {
try {
Thread.sleep(delay);
} catch (final InterruptedException e) {
sb.getLog().logInfo("interrupted delayed shutdown");
} catch (final Exception e) {
e.printStackTrace();
}
this.sb.terminate();
}
}