mirror of
https://github.com/yacy/yacy_search_server.git
synced 2024-09-19 00:01:41 +02:00
a2fa75e688
various checks like the blacklist check or the robots.txt disallow check are now done by a separate thread to unburden the indexer thread(s) TODO: maybe we have to introduce a threadpool here if it turn out that this single thread is a bottleneck because of the time consuming robots.txt downloads *) improved index transfer The index selection and transmission is done in parallel now to improve index transfer performance. TODO: maybe we could speed up performance by unsing multiple transmission threads in parallel instead of only a single one. *) gzip encoded post requests it is now configureable if a gzip encoded post request should be send on intex transfer/distribution *) storage Peer (very experimentell and not optimized yet) Now it's possible to send the result of the yacy indexer thread to a remote peer istead of storing the indexed words locally. This could be done by setting the property "storagePeerHash" in the yacy config file - Please note that if the index transfer fails, the index ist stored locally. - TODO: currently this index transfer is done by the indexer thread. To seedup the indexer a) this transmission should be done in parallel and b) multiple chunks should be bundled and transfered together *) general performance improvements - better memory cleanup after http request processing has finished - replacing some string concatenations with stringBuffers - replacing BufferedInputStreams with serverByteBuffer - replacing vectors with arraylists wherever possible - replacing hashtables with hashmaps wherever possible This was done because function calls to verctor or hashtable functions take 3 time longer than calls to functions of arraylists or hashmaps. TODO: we should take a look on the class serverObject which is inherited from hashmap Do we realy need a synchronization for this class? TODO: replace arraylists with linkedLists if random access to the list elements is not needed *) Robots Parser supports if-modified-since downloads now If the downloaded robots.txt file is older than 7 days the robots parser tries to download the robots.txt with the if-modified-since header to avoid unnecessary downloads if the file was not changed. Additionally the ETag header is used to detect changes. *) Crawler: better handling of unsupported mimeTypes + FileExtension *) Bugfix: plasmaWordIndexEntity was not closed correctly in - query.java - plasmaswitchboard.java *) function minimizeUrlDB added to yacy.java this function tests the current urlHashDB for unused urls ATTENTION: please don't use this function at the moment because it causes the wordIndexDB to flush all words into the word directory! git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@853 6c8d7289-2bf4-0310-a012-ef5d649a1542
481 lines
27 KiB
Java
481 lines
27 KiB
Java
// IndexCreate_p.java
|
|
// -----------------------
|
|
// part of the AnomicHTTPD caching proxy
|
|
// (C) by Michael Peter Christen; mc@anomic.de
|
|
// first published on http://www.anomic.de
|
|
// Frankfurt, Germany, 2004
|
|
// last major change: 02.12.2004
|
|
//
|
|
// This program is free software; you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation; either version 2 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with this program; if not, write to the Free Software
|
|
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
//
|
|
// Using this software in any meaning (reading, learning, copying, compiling,
|
|
// running) means that you agree that the Author(s) is (are) not responsible
|
|
// for cost, loss of data or any harm that may be caused directly or indirectly
|
|
// by usage of this softare or this documentation. The usage of this software
|
|
// is on your own risk. The installation and usage (starting/running) of this
|
|
// software may allow other people or application to access your computer and
|
|
// any attached devices and is highly dependent on the configuration of the
|
|
// software which must be done by the user of the software; the author(s) is
|
|
// (are) also not responsible for proper configuration and usage of the
|
|
// software, even if provoked by documentation provided together with
|
|
// the software.
|
|
//
|
|
// Any changes to this file according to the GPL as documented in the file
|
|
// gpl.txt aside this file in the shipment you received can be done to the
|
|
// lines that follows this copyright notice here, but changes must not be
|
|
// done inside the copyright notive above. A re-distribution must contain
|
|
// the intact and unchanged copyright notice.
|
|
// Contributions and changes to the program code must be marked as such.
|
|
|
|
// You must compile this file with
|
|
// javac -classpath .:../classes IndexCreate_p.java
|
|
// if the shell's current path is HTROOT
|
|
|
|
import java.io.File;
|
|
import java.io.OutputStream;
|
|
import java.io.IOException;
|
|
import java.net.MalformedURLException;
|
|
import java.net.URL;
|
|
import java.text.SimpleDateFormat;
|
|
import java.util.Date;
|
|
import java.util.Enumeration;
|
|
import java.util.HashMap;
|
|
import java.util.Iterator;
|
|
import java.util.Locale;
|
|
import java.util.Map;
|
|
import java.util.Properties;
|
|
|
|
import de.anomic.data.wikiCode;
|
|
import de.anomic.htmlFilter.htmlFilterContentScraper;
|
|
import de.anomic.htmlFilter.htmlFilterOutputStream;
|
|
import de.anomic.http.httpHeader;
|
|
import de.anomic.plasma.plasmaCrawlNURL;
|
|
import de.anomic.plasma.plasmaCrawlProfile;
|
|
import de.anomic.plasma.plasmaParser;
|
|
import de.anomic.plasma.plasmaSwitchboard;
|
|
import de.anomic.plasma.plasmaURL;
|
|
import de.anomic.plasma.plasmaCrawlProfile;
|
|
import de.anomic.server.serverFileUtils;
|
|
import de.anomic.server.serverObjects;
|
|
import de.anomic.server.serverSwitch;
|
|
import de.anomic.server.serverThread;
|
|
import de.anomic.tools.bitfield;
|
|
import de.anomic.yacy.yacyCore;
|
|
import de.anomic.yacy.yacySeed;
|
|
import de.anomic.yacy.yacyNewsRecord;
|
|
import de.anomic.yacy.yacyNewsPool;
|
|
|
|
public class IndexCreate_p {
|
|
|
|
private static SimpleDateFormat dayFormatter = new SimpleDateFormat("yyyy/MM/dd", Locale.US);
|
|
private static String daydate(Date date) {
|
|
if (date == null) return ""; else return dayFormatter.format(date);
|
|
}
|
|
|
|
public static serverObjects respond(httpHeader header, serverObjects post, serverSwitch env) {
|
|
// return variable that accumulates replacements
|
|
plasmaSwitchboard switchboard = (plasmaSwitchboard) env;
|
|
wikiCode wikiTransformer = new wikiCode(switchboard);
|
|
serverObjects prop = new serverObjects();
|
|
|
|
prop.put("error", 0);
|
|
prop.put("info", 0);
|
|
prop.put("refreshbutton", 0);
|
|
|
|
switchboard.cleanProfiles();
|
|
|
|
int i;
|
|
|
|
if (post != null) {
|
|
if (post.containsKey("crawlingstart")) {
|
|
// init crawl
|
|
if (yacyCore.seedDB == null) {
|
|
prop.put("error", 3);
|
|
} else {
|
|
// set new properties
|
|
String newcrawlingfilter = post.get("crawlingFilter", ".*");
|
|
env.setConfig("crawlingFilter", newcrawlingfilter);
|
|
int newcrawlingdepth = Integer.parseInt((String) post.get("crawlingDepth", "0"));
|
|
env.setConfig("crawlingDepth", Integer.toString(newcrawlingdepth));
|
|
boolean crawlingQ = ((String) post.get("crawlingQ", "")).equals("on");
|
|
env.setConfig("crawlingQ", (crawlingQ) ? "true" : "false");
|
|
boolean storeHTCache = ((String) post.get("storeHTCache", "")).equals("on");
|
|
env.setConfig("storeHTCache", (storeHTCache) ? "true" : "false");
|
|
boolean localIndexing = ((String) post.get("localIndexing", "")).equals("on");
|
|
env.setConfig("localIndexing", (localIndexing) ? "true" : "false");
|
|
boolean crawlOrder = ((String) post.get("crawlOrder", "")).equals("on");
|
|
env.setConfig("crawlOrder", (crawlOrder) ? "true" : "false");
|
|
boolean xsstopw = ((String) post.get("xsstopw", "")).equals("on");
|
|
env.setConfig("xsstopw", (xsstopw) ? "true" : "false");
|
|
boolean xdstopw = ((String) post.get("xdstopw", "")).equals("on");
|
|
env.setConfig("xdstopw", (xdstopw) ? "true" : "false");
|
|
boolean xpstopw = ((String) post.get("xpstopw", "")).equals("on");
|
|
env.setConfig("xpstopw", (xpstopw) ? "true" : "false");
|
|
|
|
String crawlingMode = post.get("crawlingMode","url");
|
|
if (crawlingMode.equals("url")) {
|
|
// getting the crawljob start url
|
|
String crawlingStart = post.get("crawlingURL","");
|
|
crawlingStart = crawlingStart.trim();
|
|
|
|
// adding the prefix http:// if necessary
|
|
if (!(crawlingStart.startsWith("http"))) crawlingStart = "http://" + crawlingStart;
|
|
|
|
// normalizing URL
|
|
crawlingStart = plasmaParser.urlNormalform(crawlingStart);
|
|
|
|
// check if url is proper
|
|
URL crawlingStartURL = null;
|
|
try {
|
|
crawlingStartURL = new URL(crawlingStart);
|
|
} catch (MalformedURLException e) {
|
|
crawlingStartURL = null;
|
|
}
|
|
|
|
// check if pattern matches
|
|
if ((crawlingStartURL == null) /* || (!(crawlingStart.matches(newcrawlingfilter))) */) {
|
|
// print error message
|
|
prop.put("error", 4); //crawlfilter does not match url
|
|
prop.put("error_newcrawlingfilter", newcrawlingfilter);
|
|
prop.put("error_crawlingStart", crawlingStart);
|
|
} else try {
|
|
// stack request
|
|
// first delete old entry, if exists
|
|
String urlhash = plasmaURL.urlHash(crawlingStart);
|
|
switchboard.urlPool.loadedURL.remove(urlhash);
|
|
switchboard.urlPool.noticeURL.remove(urlhash);
|
|
switchboard.urlPool.errorURL.remove(urlhash);
|
|
|
|
// stack url
|
|
plasmaCrawlProfile.entry pe = switchboard.profiles.newEntry(crawlingStartURL.getHost(), crawlingStart, newcrawlingfilter, newcrawlingfilter, newcrawlingdepth, newcrawlingdepth, crawlingQ, storeHTCache, true, localIndexing, crawlOrder, xsstopw, xdstopw, xpstopw);
|
|
String reasonString = switchboard.sbStackCrawlThread.stackCrawl(crawlingStart, null, yacyCore.seedDB.mySeed.hash, "CRAWLING-ROOT", new Date(), 0, pe);
|
|
|
|
if (reasonString == null) {
|
|
// liftoff!
|
|
prop.put("info", 2);//start msg
|
|
prop.put("info_crawlingURL", ((String) post.get("crawlingURL")));
|
|
|
|
// generate a YaCyNews if the global flag was set
|
|
if (crawlOrder) {
|
|
Map m = new HashMap(pe.map()); // must be cloned
|
|
m.remove("specificDepth");
|
|
m.remove("localIndexing");
|
|
m.remove("remoteIndexing");
|
|
m.remove("xsstopw");
|
|
m.remove("xpstopw");
|
|
m.remove("xdstopw");
|
|
m.remove("storeTXCache");
|
|
m.remove("storeHTCache");
|
|
m.remove("generalFilter");
|
|
m.remove("specificFilter");
|
|
m.put("intention", ((String) post.get("intention", "")).replace(',', '/'));
|
|
yacyCore.newsPool.publishMyNews(new yacyNewsRecord("crwlstrt", m));
|
|
}
|
|
|
|
} else {
|
|
prop.put("error", 5); //Crawling failed
|
|
prop.put("error_crawlingURL", wikiTransformer.replaceHTML(((String) post.get("crawlingURL"))));
|
|
prop.put("error_reasonString", reasonString);
|
|
|
|
switchboard.urlPool.errorURL.newEntry(crawlingStartURL, null, yacyCore.seedDB.mySeed.hash, yacyCore.seedDB.mySeed.hash,
|
|
crawlingStartURL.getHost(), reasonString, new bitfield(plasmaURL.urlFlagLength), false);
|
|
}
|
|
} catch (Exception e) {
|
|
// mist
|
|
prop.put("error", 6);//Error with url
|
|
prop.put("error_crawlingStart", crawlingStart);
|
|
prop.put("error_error", e.getMessage());
|
|
e.printStackTrace();
|
|
}
|
|
|
|
} else if (crawlingMode.equals("file")) {
|
|
if (post.containsKey("crawlingFile")) {
|
|
// getting the name of the uploaded file
|
|
String fileName = (String) post.get("crawlingFile");
|
|
try {
|
|
File file = new File(fileName);
|
|
|
|
// getting the content of the bookmark file
|
|
byte[] fileContent = (byte[]) post.get("crawlingFile$file");
|
|
|
|
// parsing the bookmark file and fetching the headline and contained links
|
|
htmlFilterContentScraper scraper = new htmlFilterContentScraper(file.toURL());
|
|
OutputStream os = new htmlFilterOutputStream(null, scraper, null, false);
|
|
serverFileUtils.write(fileContent,os);
|
|
os.close();
|
|
|
|
String headline = scraper.getHeadline();
|
|
HashMap hyperlinks = (HashMap) scraper.getAnchors();
|
|
|
|
// creating a crawler profile
|
|
plasmaCrawlProfile.entry profile = switchboard.profiles.newEntry(fileName, file.toURL().toString(), newcrawlingfilter, newcrawlingfilter, newcrawlingdepth, newcrawlingdepth, crawlingQ, storeHTCache, true, localIndexing, crawlOrder, xsstopw, xdstopw, xpstopw);
|
|
|
|
// loop through the contained links
|
|
Iterator interator = hyperlinks.entrySet().iterator();
|
|
int c = 0;
|
|
while (interator.hasNext()) {
|
|
Map.Entry e = (Map.Entry) interator.next();
|
|
String nexturlstring = (String) e.getKey();
|
|
|
|
if (nexturlstring == null) continue;
|
|
|
|
nexturlstring = nexturlstring.trim();
|
|
|
|
// normalizing URL
|
|
nexturlstring = plasmaParser.urlNormalform(nexturlstring);
|
|
|
|
// generating an url object
|
|
URL nexturlURL = null;
|
|
try {
|
|
nexturlURL = new URL(nexturlstring);
|
|
} catch (MalformedURLException ex) {
|
|
nexturlURL = null;
|
|
c++;
|
|
continue;
|
|
}
|
|
|
|
// enqueuing the url for crawling
|
|
String rejectReason = switchboard.sbStackCrawlThread.stackCrawl(nexturlstring, null, yacyCore.seedDB.mySeed.hash, (String)e.getValue(), new Date(), 1, profile);
|
|
|
|
// if something failed add the url into the errorURL list
|
|
if (rejectReason == null) {
|
|
c++;
|
|
} else {
|
|
switchboard.urlPool.errorURL.newEntry(nexturlURL, null, yacyCore.seedDB.mySeed.hash, yacyCore.seedDB.mySeed.hash,
|
|
(String) e.getValue(), rejectReason, new bitfield(plasmaURL.urlFlagLength), false);
|
|
}
|
|
}
|
|
|
|
} catch (Exception e) {
|
|
// mist
|
|
prop.put("error", 7);//Error with file
|
|
prop.put("error_crawlingStart", fileName);
|
|
prop.put("error_error", e.getMessage());
|
|
e.printStackTrace();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if (post.containsKey("distributedcrawling")) {
|
|
long newBusySleep = Integer.parseInt(env.getConfig("62_remotetriggeredcrawl_busysleep", "100"));
|
|
if (((String) post.get("dcr", "")).equals("acceptCrawlMax")) {
|
|
env.setConfig("crawlResponse", "true");
|
|
newBusySleep = 100;
|
|
} else if (((String) post.get("dcr", "")).equals("acceptCrawlLimited")) {
|
|
env.setConfig("crawlResponse", "true");
|
|
int newppm = Integer.parseInt(post.get("acceptCrawlLimit", "1"));
|
|
if (newppm < 1) newppm = 1;
|
|
newBusySleep = 60000 / newppm;
|
|
if (newBusySleep < 100) newBusySleep = 100;
|
|
} else if (((String) post.get("dcr", "")).equals("acceptCrawlDenied")) {
|
|
env.setConfig("crawlResponse", "false");
|
|
}
|
|
serverThread rct = switchboard.getThread("62_remotetriggeredcrawl");
|
|
rct.setBusySleep(newBusySleep);
|
|
env.setConfig("62_remotetriggeredcrawl_busysleep", Long.toString(newBusySleep));
|
|
//boolean crawlResponse = ((String) post.get("acceptCrawlMax", "")).equals("on");
|
|
//env.setConfig("crawlResponse", (crawlResponse) ? "true" : "false");
|
|
}
|
|
|
|
|
|
if (post.containsKey("pausecrawlqueue")) {
|
|
switchboard.pauseCrawling();
|
|
prop.put("info", 4);//crawling paused
|
|
}
|
|
|
|
if (post.containsKey("continuecrawlqueue")) {
|
|
switchboard.continueCrawling();
|
|
prop.put("info", 5);//crawling continued
|
|
}
|
|
}
|
|
|
|
// define visible variables
|
|
prop.put("proxyPrefetchDepth", env.getConfig("proxyPrefetchDepth", "0"));
|
|
prop.put("crawlingDepth", env.getConfig("crawlingDepth", "0"));
|
|
prop.put("crawlingFilter", env.getConfig("crawlingFilter", "0"));
|
|
prop.put("crawlingQChecked", env.getConfig("crawlingQ", "").equals("true") ? 1 : 0);
|
|
prop.put("storeHTCacheChecked", env.getConfig("storeHTCache", "").equals("true") ? 1 : 0);
|
|
prop.put("localIndexingChecked", env.getConfig("localIndexing", "").equals("true") ? 1 : 0);
|
|
prop.put("crawlOrderChecked", env.getConfig("crawlOrder", "").equals("true") ? 1 : 0);
|
|
long busySleep = Integer.parseInt(env.getConfig("62_remotetriggeredcrawl_busysleep", "100"));
|
|
if (busySleep < 100) {
|
|
busySleep = 100;
|
|
env.setConfig("62_remotetriggeredcrawl_busysleep", Long.toString(busySleep));
|
|
}
|
|
if (env.getConfig("crawlResponse", "").equals("true")) {
|
|
if (busySleep <= 100) {
|
|
prop.put("acceptCrawlMaxChecked", 1);
|
|
prop.put("acceptCrawlLimitedChecked", 0);
|
|
prop.put("acceptCrawlDeniedChecked", 0);
|
|
} else {
|
|
prop.put("acceptCrawlMaxChecked", 0);
|
|
prop.put("acceptCrawlLimitedChecked", 1);
|
|
prop.put("acceptCrawlDeniedChecked", 0);
|
|
}
|
|
} else {
|
|
prop.put("acceptCrawlMaxChecked", 0);
|
|
prop.put("acceptCrawlLimitedChecked", 0);
|
|
prop.put("acceptCrawlDeniedChecked", 1);
|
|
}
|
|
int ppm = (int) ((long) 60000 / busySleep);
|
|
if (ppm > 60) ppm = 60;
|
|
prop.put("PPM", ppm);
|
|
prop.put("xsstopwChecked", env.getConfig("xsstopw", "").equals("true") ? 1 : 0);
|
|
prop.put("xdstopwChecked", env.getConfig("xdstopw", "").equals("true") ? 1 : 0);
|
|
prop.put("xpstopwChecked", env.getConfig("xpstopw", "").equals("true") ? 1 : 0);
|
|
|
|
int queueStackSize = switchboard.sbQueue.size();
|
|
int loaderThreadsSize = switchboard.cacheLoader.size();
|
|
int crawlerListSize = switchboard.urlPool.noticeURL.stackSize();
|
|
int completequeue = queueStackSize + loaderThreadsSize + crawlerListSize;
|
|
|
|
if ((completequeue > 0) || ((post != null) && (post.containsKey("refreshpage")))) {
|
|
prop.put("refreshbutton", 1);
|
|
}
|
|
|
|
// create prefetch table
|
|
boolean dark;
|
|
|
|
// sed crawl profiles
|
|
int count = 0;
|
|
//try{
|
|
Iterator it = switchboard.profiles.profiles(true);
|
|
plasmaCrawlProfile.entry profile;
|
|
dark = true;
|
|
while (it.hasNext()) {
|
|
profile = (plasmaCrawlProfile.entry) it.next();
|
|
//table += profile.map().toString() + "<br>";
|
|
prop.put("crawlProfiles_"+count+"_dark", ((dark) ? 1 : 0));
|
|
prop.put("crawlProfiles_"+count+"_name", wikiTransformer.replaceHTML(profile.name()));
|
|
prop.put("crawlProfiles_"+count+"_startURL", wikiTransformer.replaceHTML(profile.startURL()));
|
|
prop.put("crawlProfiles_"+count+"_handle", wikiTransformer.replaceHTML(profile.handle()));
|
|
prop.put("crawlProfiles_"+count+"_depth", profile.generalDepth());
|
|
prop.put("crawlProfiles_"+count+"_filter", profile.generalFilter());
|
|
prop.put("crawlProfiles_"+count+"_withQuery", ((profile.crawlingQ()) ? 1 : 0));
|
|
prop.put("crawlProfiles_"+count+"_storeCache", ((profile.storeHTCache()) ? 1 : 0));
|
|
prop.put("crawlProfiles_"+count+"_localIndexing", ((profile.localIndexing()) ? 1 : 0));
|
|
prop.put("crawlProfiles_"+count+"_remoteIndexing", ((profile.remoteIndexing()) ? 1 : 0));
|
|
|
|
dark = !dark;
|
|
count++;
|
|
}
|
|
//}catch(IOException e){};
|
|
prop.put("crawlProfiles", count);
|
|
|
|
|
|
// create other peer crawl table using YaCyNews
|
|
int availableNews = yacyCore.newsPool.size(yacyNewsPool.INCOMING_DB);
|
|
int showedCrawl = 0;
|
|
yacyNewsRecord record;
|
|
yacySeed peer;
|
|
String peername;
|
|
try {
|
|
for (int c = 0; c < availableNews; c++) {
|
|
record = yacyCore.newsPool.get(yacyNewsPool.INCOMING_DB, c);
|
|
if (record == null) continue;
|
|
if (record.category().equals("crwlstrt")) {
|
|
peer = yacyCore.seedDB.get(record.originator());
|
|
if (peer == null) peername = record.originator(); else peername = peer.getName();
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_dark", ((dark) ? 1 : 0));
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_cre", record.created());
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_peername", wikiTransformer.replaceHTML(peername));
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_startURL", wikiTransformer.replaceHTML(record.attributes().get("startURL").toString()));
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_intention", wikiTransformer.replaceHTML(record.attributes().get("intention").toString()));
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_generalDepth", record.attributes().get("generalDepth"));
|
|
prop.put("otherCrawlStartInProgress_" + showedCrawl + "_crawlingQ", (record.attributes().get("crawlingQ").equals("true")) ? 1 : 0);
|
|
showedCrawl++;
|
|
if (showedCrawl > 20) break;
|
|
}
|
|
|
|
}
|
|
} catch (IOException e) {}
|
|
prop.put("otherCrawlStartInProgress", showedCrawl);
|
|
|
|
// finished remote crawls
|
|
availableNews = yacyCore.newsPool.size(yacyNewsPool.PROCESSED_DB);
|
|
showedCrawl = 0;
|
|
try {
|
|
for (int c = 0; c < availableNews; c++) {
|
|
record = yacyCore.newsPool.get(yacyNewsPool.PROCESSED_DB, c);
|
|
if (record == null) continue;
|
|
if (record.category().equals("crwlstrt")) {
|
|
peer = yacyCore.seedDB.get(record.originator());
|
|
if (peer == null) peername = record.originator(); else peername = peer.getName();
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_dark", ((dark) ? 1 : 0));
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_cre", record.created());
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_peername", wikiTransformer.replaceHTML(peername));
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_startURL", wikiTransformer.replaceHTML(record.attributes().get("startURL").toString()));
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_intention", wikiTransformer.replaceHTML(record.attributes().get("intention").toString()));
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_generalDepth", record.attributes().get("generalDepth"));
|
|
prop.put("otherCrawlStartFinished_" + showedCrawl + "_crawlingQ", (record.attributes().get("crawlingQ").equals("true")) ? 1 : 0);
|
|
showedCrawl++;
|
|
if (showedCrawl > 20) break;
|
|
}
|
|
|
|
}
|
|
} catch (IOException e) {}
|
|
prop.put("otherCrawlStartFinished", showedCrawl);
|
|
|
|
|
|
// remote crawl peers
|
|
if (yacyCore.seedDB == null) {
|
|
//table += "Sorry, cannot show any crawl output now because the system is not completely initialised. Please re-try.";
|
|
prop.put("error", 3);
|
|
} else {
|
|
Enumeration crawlavail = yacyCore.dhtAgent.getAcceptRemoteCrawlSeeds(plasmaURL.dummyHash, true);
|
|
Enumeration crawlpendi = yacyCore.dhtAgent.getAcceptRemoteCrawlSeeds(plasmaURL.dummyHash, false);
|
|
if ((!(crawlavail.hasMoreElements())) && (!(crawlpendi.hasMoreElements()))) {
|
|
prop.put("remoteCrawlPeers", 0); //no peers availible
|
|
} else {
|
|
prop.put("remoteCrawlPeers", 1);
|
|
int maxcount = 100;
|
|
int availcount = 0;
|
|
yacySeed seed;
|
|
while ((availcount < maxcount) && (crawlavail.hasMoreElements())) {
|
|
seed = (yacySeed) crawlavail.nextElement();
|
|
prop.put("remoteCrawlPeers_available_" + availcount + "_name", seed.getName());
|
|
prop.put("remoteCrawlPeers_available_" + availcount + "_due", (yacyCore.yacyTime() - seed.available));
|
|
availcount++;
|
|
}
|
|
prop.put("remoteCrawlPeers_available", availcount);
|
|
int pendicount = 0;
|
|
while ((pendicount < maxcount) && (crawlpendi.hasMoreElements())) {
|
|
seed = (yacySeed) crawlpendi.nextElement();
|
|
prop.put("remoteCrawlPeers_busy_" + pendicount + "_name", seed.getName());
|
|
prop.put("remoteCrawlPeers_busy_" + pendicount + "_due", (yacyCore.yacyTime() - seed.available));
|
|
pendicount++;
|
|
}
|
|
prop.put("remoteCrawlPeers_busy", pendicount);
|
|
prop.put("remoteCrawlPeers_num", (availcount + pendicount));
|
|
}
|
|
|
|
}
|
|
|
|
|
|
prop.put("crawler-paused",(switchboard.crawlingIsPaused())?0:1);
|
|
|
|
// return rewrite properties
|
|
return prop;
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|