- enhanced performance graph (more info)

- added conditions for rarely used logging lines to prevent unnecessary CPU usage for non-printed info

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4667 6c8d7289-2bf4-0310-a012-ef5d649a1542
This commit is contained in:
orbiter 2008-04-08 14:44:39 +00:00
parent 696b8ee3f5
commit 14404d31a8
19 changed files with 100 additions and 87 deletions

View File

@ -26,6 +26,7 @@
import de.anomic.http.httpHeader;
import de.anomic.plasma.plasmaProfiling;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.server.serverObjects;
import de.anomic.server.serverSwitch;
import de.anomic.ymage.ymageMatrix;
@ -33,14 +34,14 @@ import de.anomic.ymage.ymageMatrix;
public class PerformanceGraph {
public static ymageMatrix respond(httpHeader header, serverObjects post, serverSwitch<?> env) {
//plasmaSwitchboard sb = (plasmaSwitchboard) env;
plasmaSwitchboard sb = (plasmaSwitchboard) env;
if (post == null) post = new serverObjects();
int width = post.getInt("width", 660);
int height = post.getInt("height", 240);
return plasmaProfiling.performanceGraph(width, height);
return plasmaProfiling.performanceGraph(width, height, sb.wordIndex.countURL() + " URLS / " + sb.wordIndex.collectionsSize() + " WORDS IN COLLECTIONS / " + sb.wordIndex.cacheSize() + " WORDS IN CACHE");
}
}

View File

@ -542,7 +542,7 @@ final class memprofiler extends Thread {
public memprofiler(int width, int height, int expectedTimeSeconds, File outputFile) {
this.outputFile = outputFile;
int expectedKilobytes = 20 * 1024;//(Runtime.getRuntime().totalMemory() / 1024);
memChart = new ymageChart(width, height, "FFFFFF", "000000", 50, 20, 20, 20, "MEMORY CHART FROM EXECUTION AT " + new Date());
memChart = new ymageChart(width, height, "FFFFFF", "000000", "000000", 50, 20, 20, 20, "MEMORY CHART FROM EXECUTION AT " + new Date(), null);
int timescale = 10; // steps with each 10 seconds
int memscale = 1024;
memChart.declareDimension(ymageChart.DIMENSION_BOTTOM, timescale, (width - 40) * timescale / expectedTimeSeconds, 0, "FFFFFF", "555555", "SECONDS");

View File

@ -267,7 +267,7 @@ public class kelondroBase64Order extends kelondroAbstractOrder<byte[]> implement
} catch (ArrayIndexOutOfBoundsException e) {
// maybe the input was not base64
// throw new RuntimeException("input probably not base64");
this.log.logFine("wrong string receive: " + in + ", call: " + info);
if (this.log.isFine()) this.log.logFine("wrong string receive: " + in + ", call: " + info);
return new byte[0];
}
}

View File

@ -167,7 +167,7 @@ public class kelondroEcoTable implements kelondroIndex {
System.out.flush();
this.file = new kelondroBufferedEcoFS(new kelondroEcoFS(tablefile, rowdef.objectsize), this.buffersize);
ArrayList<Integer[]> doubles = index.removeDoubles();
assert index.size() + doubles.size() + fail == i;
//assert index.size() + doubles.size() + fail == i;
System.out.println(" -removed " + doubles.size() + " doubles- done.");
if (doubles.size() > 0) {
System.out.println("DEBUG " + tablefile + ": WARNING - EcoTable " + tablefile + " has " + doubles.size() + " doubles");
@ -196,7 +196,7 @@ public class kelondroEcoTable implements kelondroIndex {
}
try {
assert file.size() == index.size() + doubles.size() + fail : "file.size() = " + file.size() + ", index.size() = " + index.size() + ", doubles.size() = " + doubles.size() + ", fail = " + fail + ", i = " + i;
assert file.size() == index.size() + fail : "file.size() = " + file.size() + ", index.size() = " + index.size() + ", doubles.size() = " + doubles.size() + ", fail = " + fail + ", i = " + i;
} catch (IOException e) {
e.printStackTrace();
}

View File

@ -168,15 +168,15 @@ public class plasmaCrawlQueues {
return false;
}
if (sb.sbQueue.size() >= (int) sb.getConfigLong(plasmaSwitchboard.INDEXER_SLOTS, 30)) {
log.logFine("CoreCrawl: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
if (this.log.isFine()) log.logFine("CoreCrawl: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
return false;
}
if (this.size() >= sb.getConfigLong(plasmaSwitchboard.CRAWLER_THREADS_ACTIVE_MAX, 10)) {
log.logFine("CoreCrawl: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
if (this.log.isFine()) log.logFine("CoreCrawl: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
return false;
}
if (sb.onlineCaution()) {
log.logFine("CoreCrawl: online caution, omitting processing");
if (this.log.isFine()) log.logFine("CoreCrawl: online caution, omitting processing");
return false;
}
@ -218,7 +218,7 @@ public class plasmaCrawlQueues {
return true;
}
log.logFine("LOCALCRAWL: URL=" + urlEntry.url() + ", initiator=" + urlEntry.initiator() + ", crawlOrder=" + ((profile.remoteIndexing()) ? "true" : "false") + ", depth=" + urlEntry.depth() + ", crawlDepth=" + profile.generalDepth() + ", filter=" + profile.generalFilter()
if (this.log.isFine()) log.logFine("LOCALCRAWL: URL=" + urlEntry.url() + ", initiator=" + urlEntry.initiator() + ", crawlOrder=" + ((profile.remoteIndexing()) ? "true" : "false") + ", depth=" + urlEntry.depth() + ", crawlDepth=" + profile.generalDepth() + ", filter=" + profile.generalFilter()
+ ", permission=" + ((yacyCore.seedDB == null) ? "undefined" : (((yacyCore.seedDB.mySeed().isSenior()) || (yacyCore.seedDB.mySeed().isPrincipal())) ? "true" : "false")));
processLocalCrawling(urlEntry, stats);
@ -245,17 +245,17 @@ public class plasmaCrawlQueues {
}
if (sb.sbQueue.size() >= (int) sb.getConfigLong(plasmaSwitchboard.INDEXER_SLOTS, 30)) {
log.logFine("remoteCrawlLoaderJob: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
if (this.log.isFine()) log.logFine("remoteCrawlLoaderJob: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
return false;
}
if (this.size() >= sb.getConfigLong(plasmaSwitchboard.CRAWLER_THREADS_ACTIVE_MAX, 10)) {
log.logFine("remoteCrawlLoaderJob: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
if (this.log.isFine()) log.logFine("remoteCrawlLoaderJob: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
return false;
}
if (sb.onlineCaution()) {
log.logFine("remoteCrawlLoaderJob: online caution, omitting processing");
if (this.log.isFine()) log.logFine("remoteCrawlLoaderJob: online caution, omitting processing");
return false;
}
@ -360,15 +360,15 @@ public class plasmaCrawlQueues {
return false;
}
if (sb.sbQueue.size() >= (int) sb.getConfigLong(plasmaSwitchboard.INDEXER_SLOTS, 30)) {
log.logFine("GlobalCrawl: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
if (this.log.isFine()) log.logFine("GlobalCrawl: too many processes in indexing queue, dismissed (" + "sbQueueSize=" + sb.sbQueue.size() + ")");
return false;
}
if (this.size() >= sb.getConfigLong(plasmaSwitchboard.CRAWLER_THREADS_ACTIVE_MAX, 10)) {
log.logFine("GlobalCrawl: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
if (this.log.isFine()) log.logFine("GlobalCrawl: too many processes in loader queue, dismissed (" + "cacheLoader=" + this.size() + ")");
return false;
}
if (sb.onlineCaution()) {
log.logFine("GlobalCrawl: online caution, omitting processing");
if (this.log.isFine()) log.logFine("GlobalCrawl: online caution, omitting processing");
return false;
}
@ -407,7 +407,7 @@ public class plasmaCrawlQueues {
return true;
}
log.logFine("plasmaSwitchboard.remoteTriggeredCrawlJob: url=" + urlEntry.url() + ", initiator=" + urlEntry.initiator() + ", crawlOrder=" + ((profile.remoteIndexing()) ? "true" : "false") + ", depth=" + urlEntry.depth() + ", crawlDepth=" + profile.generalDepth() + ", filter="
if (this.log.isFine()) log.logFine("plasmaSwitchboard.remoteTriggeredCrawlJob: url=" + urlEntry.url() + ", initiator=" + urlEntry.initiator() + ", crawlOrder=" + ((profile.remoteIndexing()) ? "true" : "false") + ", depth=" + urlEntry.depth() + ", crawlDepth=" + profile.generalDepth() + ", filter="
+ profile.generalFilter() + ", permission=" + ((yacyCore.seedDB == null) ? "undefined" : (((yacyCore.seedDB.mySeed().isSenior()) || (yacyCore.seedDB.mySeed().isPrincipal())) ? "true" : "false")));
processLocalCrawling(urlEntry, stats);
@ -476,7 +476,7 @@ public class plasmaCrawlQueues {
// checking robots.txt for http(s) resources
this.entry.setStatus("worker-checkingrobots");
if ((entry.url().getProtocol().equals("http") || entry.url().getProtocol().equals("https")) && robotsParser.isDisallowed(entry.url())) {
log.logFine("Crawling of URL '" + entry.url().toString() + "' disallowed by robots.txt.");
if (log.isFine()) log.logFine("Crawling of URL '" + entry.url().toString() + "' disallowed by robots.txt.");
plasmaCrawlZURL.Entry eentry = errorURL.newEntry(this.entry.url(), "denied by robots.txt");
eentry.store();
errorURL.push(eentry);

View File

@ -106,11 +106,11 @@ public class dbImportManager {
runningJobs.interrupt();
// we need to use a timeout here because of missing interruptable session threads ...
log.logFine("Waiting for " + runningJobs.activeCount() + " remaining dbImporter threads to finish shutdown ...");
if (log.isFine()) log.logFine("Waiting for " + runningJobs.activeCount() + " remaining dbImporter threads to finish shutdown ...");
for ( int currentThreadIdx = 0; currentThreadIdx < threadCount; currentThreadIdx++ ) {
Thread currentThread = threadList[currentThreadIdx];
if (currentThread.isAlive()) {
log.logFine("Waiting for dbImporter thread '" + currentThread.getName() + "' [" + currentThreadIdx + "] to finish shutdown.");
if (log.isFine()) log.logFine("Waiting for dbImporter thread '" + currentThread.getName() + "' [" + currentThreadIdx + "] to finish shutdown.");
try { currentThread.join(500); } catch (InterruptedException ex) {}
}
}

View File

@ -188,7 +188,7 @@ public final class plasmaCrawlStacker extends Thread {
}
terminateDNSPrefetcher();
this.log.logFine("Shutdown. Closing stackCrawl queue.");
this.log.logInfo("Shutdown. Closing stackCrawl queue.");
// closing the db
this.urlEntryCache.close();
@ -387,7 +387,7 @@ public final class plasmaCrawlStacker extends Thread {
// check if ip is local ip address
if (!sb.acceptURL(entry.url())) {
reason = plasmaCrawlEURL.DENIED_IP_ADDRESS_NOT_IN_DECLARED_DOMAIN + "[" + sb.getConfig("network.unit.domain", "unknown") + "]";
this.log.logFine("Host in URL '" + entry.url().toString() + "' has IP address outside of declared range (" + sb.getConfig("network.unit.domain", "unknown") + "). " +
if (this.log.isFine()) this.log.logFine("Host in URL '" + entry.url().toString() + "' has IP address outside of declared range (" + sb.getConfig("network.unit.domain", "unknown") + "). " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -395,7 +395,7 @@ public final class plasmaCrawlStacker extends Thread {
// check blacklist
if (plasmaSwitchboard.urlBlacklist.isListed(indexReferenceBlacklist.BLACKLIST_CRAWLER, entry.url())) {
reason = plasmaCrawlEURL.DENIED_URL_IN_BLACKLIST;
this.log.logFine("URL '" + entry.url().toString() + "' is in blacklist. " +
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is in blacklist. " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -411,7 +411,7 @@ public final class plasmaCrawlStacker extends Thread {
if ((entry.depth() > 0) && (profile != null) && (!(entry.url().toString().matches(profile.generalFilter())))) {
reason = plasmaCrawlEURL.DENIED_URL_DOES_NOT_MATCH_FILTER;
this.log.logFine("URL '" + entry.url().toString() + "' does not match crawling filter '" + profile.generalFilter() + "'. " +
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' does not match crawling filter '" + profile.generalFilter() + "'. " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -420,7 +420,7 @@ public final class plasmaCrawlStacker extends Thread {
if (entry.url().isCGI()) {
reason = plasmaCrawlEURL.DENIED_CGI_URL;
this.log.logFine("URL '" + entry.url().toString() + "' is CGI URL. " +
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is CGI URL. " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -429,7 +429,7 @@ public final class plasmaCrawlStacker extends Thread {
if ((entry.url().isPOST()) && (profile != null) && (!(profile.crawlingQ()))) {
reason = plasmaCrawlEURL.DENIED_POST_URL;
this.log.logFine("URL '" + entry.url().toString() + "' is post URL. " +
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is post URL. " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -444,7 +444,7 @@ public final class plasmaCrawlStacker extends Thread {
// deny urls that do not match with the profile domain list
if (!(profile.grantedDomAppearance(entry.url().getHost()))) {
reason = plasmaCrawlEURL.DENIED_NO_MATCH_WITH_DOMAIN_FILTER;
this.log.logFine("URL '" + entry.url().toString() + "' is not listed in granted domains. " +
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is not listed in granted domains. " +
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -452,7 +452,7 @@ public final class plasmaCrawlStacker extends Thread {
// deny urls that exceed allowed number of occurrences
if (!(profile.grantedDomCount(entry.url().getHost()))) {
reason = plasmaCrawlEURL.DENIED_DOMAIN_COUNT_EXCEEDED;
this.log.logFine("URL '" + entry.url().toString() + "' appeared too often, a maximum of " + profile.domMaxPages() + " is allowed. "+
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' appeared too often, a maximum of " + profile.domMaxPages() + " is allowed. "+
"Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
@ -464,18 +464,18 @@ public final class plasmaCrawlStacker extends Thread {
// do double-check
if ((dbocc != null) && (!recrawl)) {
reason = plasmaCrawlEURL.DOUBLE_REGISTERED + dbocc + ")";
this.log.logFine("URL '" + entry.url().toString() + "' is double registered in '" + dbocc + "'. " + "Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is double registered in '" + dbocc + "'. " + "Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
if ((oldEntry != null) && (!recrawl)) {
reason = plasmaCrawlEURL.DOUBLE_REGISTERED + "LURL)";
this.log.logFine("URL '" + entry.url().toString() + "' is double registered in 'LURL'. " + "Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
if (this.log.isFine()) this.log.logFine("URL '" + entry.url().toString() + "' is double registered in 'LURL'. " + "Stack processing time: " + (System.currentTimeMillis()-startTime) + "ms");
return reason;
}
// show potential re-crawl
if (recrawl) {
this.log.logFine("RE-CRAWL of URL '" + entry.url().toString() + "': this url was crawled " +
if (this.log.isFine()) this.log.logFine("RE-CRAWL of URL '" + entry.url().toString() + "': this url was crawled " +
((System.currentTimeMillis() - oldEntry.loaddate().getTime()) / 60000 / 60 / 24) + " days ago.");
}

View File

@ -132,13 +132,13 @@ public class plasmaDHTChunk {
this.log = log;
this.wordIndex = wordIndex;
this.startPointHash = selectTransferStart();
log.logFine("Selected hash " + this.startPointHash + " as start point for index distribution, distance = " + yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed().hash, this.startPointHash));
if (this.log.isFine()) log.logFine("Selected hash " + this.startPointHash + " as start point for index distribution, distance = " + yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed().hash, this.startPointHash));
selectTransferContainers(this.startPointHash, minCount, maxCount, maxtime);
// count the indexes, can be smaller as expected
this.idxCount = indexCounter();
if (this.idxCount < minCount) {
log.logFine("Too few (" + this.idxCount + ") indexes selected for transfer.");
if (this.log.isFine()) log.logFine("Too few (" + this.idxCount + ") indexes selected for transfer.");
this.status = chunkStatus_FAILED;
}
} catch (InterruptedException e) {
@ -150,13 +150,13 @@ public class plasmaDHTChunk {
try {
this.log = log;
this.wordIndex = wordIndex;
log.logFine("Demanded hash " + startHash + " as start point for index distribution, distance = " + yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed().hash, this.startPointHash));
if (this.log.isFine()) log.logFine("Demanded hash " + startHash + " as start point for index distribution, distance = " + yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed().hash, this.startPointHash));
selectTransferContainers(startHash, minCount, maxCount, maxtime);
// count the indexes, can be smaller as expected
this.idxCount = indexCounter();
if (this.idxCount < minCount) {
log.logFine("Too few (" + this.idxCount + ") indexes selected for transfer.");
if (this.log.isFine()) log.logFine("Too few (" + this.idxCount + ") indexes selected for transfer.");
this.status = chunkStatus_FAILED;
}
} catch (InterruptedException e) {
@ -194,11 +194,11 @@ public class plasmaDHTChunk {
this.selectionStartTime = System.currentTimeMillis();
int refcountRAM = selectTransferContainersResource(hash, true, maxcount, maxtime);
if (refcountRAM >= mincount) {
log.logFine("DHT selection from RAM: " + refcountRAM + " entries");
if (this.log.isFine()) log.logFine("DHT selection from RAM: " + refcountRAM + " entries");
return;
}
int refcountFile = selectTransferContainersResource(hash, false, maxcount, maxtime);
log.logFine("DHT selection from FILE: " + refcountFile + " entries, RAM provided only " + refcountRAM + " entries");
if (this.log.isFine()) log.logFine("DHT selection from FILE: " + refcountFile + " entries, RAM provided only " + refcountRAM + " entries");
return;
} finally {
this.selectionEndTime = System.currentTimeMillis();
@ -268,7 +268,7 @@ public class plasmaDHTChunk {
}
// use whats left
log.logFine("Selected partial index (" + container.size() + " from " + wholesize + " URLs, " + notBoundCounter + " not bound) for word " + container.getWordHash());
if (this.log.isFine()) log.logFine("Selected partial index (" + container.size() + " from " + wholesize + " URLs, " + notBoundCounter + " not bound) for word " + container.getWordHash());
tmpContainers.add(container);
} catch (kelondroException e) {
log.logSevere("plasmaWordIndexDistribution/2: deleted DB for word " + container.getWordHash(), e);
@ -279,7 +279,7 @@ public class plasmaDHTChunk {
indexContainers = (indexContainer[]) tmpContainers.toArray(new indexContainer[tmpContainers.size()]);
//[C[16GwGuFzwffp] has 1 entries, C[16hGKMAl0w97] has 9 entries, C[17A8cDPF6SfG] has 9 entries, C[17Kdj__WWnUy] has 1 entries, C[1
if ((indexContainers == null) || (indexContainers.length == 0)) {
log.logFine("No index available for index transfer, hash start-point " + startPointHash);
if (this.log.isFine()) log.logFine("No index available for index transfer, hash start-point " + startPointHash);
this.status = chunkStatus_FAILED;
return 0;
}
@ -304,7 +304,7 @@ public class plasmaDHTChunk {
for (int i = 0; i < this.indexContainers.length; i++) {
// delete entries separately
if (this.indexContainers[i] == null) {
log.logFine("Deletion of partial index #" + i + " not possible, entry is null");
if (this.log.isFine()) log.logFine("Deletion of partial index #" + i + " not possible, entry is null");
continue;
}
int c = this.indexContainers[i].size();
@ -317,7 +317,7 @@ public class plasmaDHTChunk {
String wordHash = indexContainers[i].getWordHash();
count = wordIndex.removeEntriesExpl(this.indexContainers[i].getWordHash(), urlHashes);
if (log.isFine())
log.logFine("Deleted partial index (" + c + " URLs) for word " + wordHash + "; " + this.wordIndex.indexSize(wordHash) + " entries left");
if (this.log.isFine()) log.logFine("Deleted partial index (" + c + " URLs) for word " + wordHash + "; " + this.wordIndex.indexSize(wordHash) + " entries left");
this.indexContainers[i] = null;
}
return count;

View File

@ -154,7 +154,7 @@ public class plasmaDHTFlush extends Thread {
plasmaDHTChunk newDHTChunk = null, oldDHTChunk = null;
try {
// initial startingpoint of intex transfer is "AAAAAAAAAAAA"
this.log.logFine("Selected hash " + this.startPointHash + " as start point for index distribution of whole index");
if (this.log.isFine()) this.log.logFine("Selected hash " + this.startPointHash + " as start point for index distribution of whole index");
/* Loop until we have
* - finished transfer of whole index
@ -181,7 +181,7 @@ public class plasmaDHTFlush extends Thread {
this.startPointHash = "AAAAAAAAAAAA";
} else {
// otherwise we could end transfer now
this.log.logFine("No index available for index transfer, hash start-point " + this.startPointHash);
if (this.log.isFine()) this.log.logFine("No index available for index transfer, hash start-point " + this.startPointHash);
this.status = "Finished. " + iteration + " chunks transfered.";
this.finished = true;
}
@ -223,7 +223,7 @@ public class plasmaDHTFlush extends Thread {
if (this.delete) {
this.status = "Running: Deleting chunk " + iteration;
String urlReferences = oldDHTChunk.deleteTransferIndexes();
this.log.logFine("Deleted from " + oldDHTChunk.containerSize() + " transferred RWIs locally " + urlReferences + " URL references");
if (this.log.isFine()) this.log.logFine("Deleted from " + oldDHTChunk.containerSize() + " transferred RWIs locally " + urlReferences + " URL references");
}
oldDHTChunk = null;
}

View File

@ -407,7 +407,7 @@ public final class plasmaHTCache {
if (deleteFileandDirs(getCachePath(url), "FROM")) {
try {
// As the file is gone, the entry in responseHeader.db is not needed anymore
log.logFinest("Trying to remove responseHeader from URL: " + url.toNormalform(false, true));
if (log.isFinest()) log.logFinest("Trying to remove responseHeader from URL: " + url.toNormalform(false, true));
responseHeaderDB.remove(url.hash());
} catch (IOException e) {
resetResponseHeaderDB();
@ -436,7 +436,7 @@ public final class plasmaHTCache {
// If the has been emptied, remove it
// Loop as long as we produce empty driectoriers, but stop at HTCACHE
while ((!(obj.equals(cachePath))) && (obj.isDirectory()) && (obj.list().length == 0)) {
if (obj.delete()) log.logFine("DELETED EMPTY DIRECTORY : " + obj.toString());
if (obj.delete()) if (log.isFine()) log.logFine("DELETED EMPTY DIRECTORY : " + obj.toString());
obj = obj.getParentFile();
}
return true;
@ -458,19 +458,19 @@ public final class plasmaHTCache {
if (System.currentTimeMillis() - t < 300000) break; // files must have been at least 5 minutes in the cache before they are deleted
if (file != null) {
if (filesInUse.contains(file)) continue;
log.logFinest("Trying to delete [" + key + "] = old file: " + file.toString());
if (log.isFinest()) log.logFinest("Trying to delete [" + key + "] = old file: " + file.toString());
// This needs to be called *before* the file is deleted
String urlHash = getHash(file);
if (deleteFileandDirs(file, "OLD")) {
try {
// As the file is gone, the entry in responseHeader.db is not needed anymore
if (urlHash != null) {
log.logFinest("Trying to remove responseHeader for URLhash: " + urlHash);
if (log.isFinest()) log.logFinest("Trying to remove responseHeader for URLhash: " + urlHash);
responseHeaderDB.remove(urlHash);
} else {
yacyURL url = getURL(file);
if (url != null) {
log.logFinest("Trying to remove responseHeader for URL: " + url.toNormalform(false, true));
if (log.isFinest()) log.logFinest("Trying to remove responseHeader for URL: " + url.toNormalform(false, true));
responseHeaderDB.remove(url.hash());
}
}
@ -676,13 +676,13 @@ public final class plasmaHTCache {
cacheAge.put(ageString(d, newpath), newpath);
File obj = oldpath.getParentFile();
while ((!(obj.equals(cachePath))) && (obj.isDirectory()) && (obj.list().length == 0)) {
if (obj.delete()) log.logFine("DELETED EMPTY DIRECTORY : " + obj.toString());
if (obj.delete()) if (log.isFine()) log.logFine("DELETED EMPTY DIRECTORY : " + obj.toString());
obj = obj.getParentFile();
}
}
}
} catch (Exception e) {
log.logFine("moveCachedObject('" + oldpath.toString() + "','" +
if (log.isFine()) log.logFine("moveCachedObject('" + oldpath.toString() + "','" +
newpath.toString() + "')", e);
}
}

View File

@ -63,7 +63,7 @@ public class plasmaProfiling {
return max;
}
public static ymageMatrix performanceGraph(int width, int height) {
public static ymageMatrix performanceGraph(int width, int height, String subline) {
// find maximum values for automatic graph dimension adoption
int maxppm = (int) maxPayload("ppm", 25);
long maxbytes = maxPayload("memory", 110 * 1024 * 1024);
@ -73,13 +73,13 @@ public class plasmaProfiling {
int rightborder = 30;
int topborder = 20;
int bottomborder = 20;
int leftscale = 20;
int leftscale = 50;
int rightscale = 100;
int bottomscale = 60;
int vspace = height - topborder - bottomborder;
int hspace = width - leftborder - rightborder;
int maxtime = 600;
ymageChart chart = new ymageChart(width, height, "FFFFFF", "000000", leftborder, rightborder, topborder, bottomborder, "PEER PERFORMANCE GRAPH: PAGES/MINUTE and USED MEMORY");
ymageChart chart = new ymageChart(width, height, "FFFFFF", "000000", "AAAAAA", leftborder, rightborder, topborder, bottomborder, "PEER PERFORMANCE GRAPH: PAGES/MINUTE and USED MEMORY", subline);
chart.declareDimension(ymageChart.DIMENSION_BOTTOM, bottomscale, hspace / (maxtime / bottomscale), -maxtime, "000000", "CCCCCC", "TIME/SECONDS");
chart.declareDimension(ymageChart.DIMENSION_LEFT, leftscale, vspace * leftscale / maxppm, 0, "008800", null , "PPM [PAGES/MINUTE]");
chart.declareDimension(ymageChart.DIMENSION_RIGHT, rightscale, vspace * rightscale / (int)(maxbytes / 1024 / 1024), 0, "0000FF", "CCCCCC", "MEMORY/MEGABYTE");

View File

@ -1631,7 +1631,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
* check if ip is local ip address // TODO: remove this procotol specific code here
* ========================================================================= */
if (!acceptURL(entry.url())) {
this.log.logFine("Host in URL '" + entry.url() + "' is not in defined indexing domain.");
if (this.log.isFine()) this.log.logFine("Host in URL '" + entry.url() + "' is not in defined indexing domain.");
doIndexing = false;
}
@ -1656,9 +1656,9 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
String error = entry.shallStoreCacheForProxy();
if (error == null) {
plasmaHTCache.writeResourceContent(entry.url(), entry.cacheArray());
this.log.logFine("WROTE FILE (" + entry.cacheArray().length + " bytes) for " + entry.cacheFile());
if (this.log.isFine()) this.log.logFine("WROTE FILE (" + entry.cacheArray().length + " bytes) for " + entry.cacheFile());
} else {
this.log.logFine("WRITE OF FILE " + entry.cacheFile() + " FORBIDDEN: " + error);
if (this.log.isFine()) this.log.logFine("WRITE OF FILE " + entry.cacheFile() + " FORBIDDEN: " + error);
}
}
}
@ -1770,7 +1770,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
plasmaSwitchboardQueue.QueueEntry nextentry = null;
synchronized (sbQueue) {
// do one processing step
log.logFine("DEQUEUE: sbQueueSize=" + sbQueue.size() +
if (this.log.isFine()) log.logFine("DEQUEUE: sbQueueSize=" + sbQueue.size() +
", coreStackSize=" + crawlQueues.noticeURL.stackSize(plasmaCrawlNURL.STACK_TYPE_CORE) +
", limitStackSize=" + crawlQueues.noticeURL.stackSize(plasmaCrawlNURL.STACK_TYPE_LIMIT) +
", overhangStackSize=" + crawlQueues.noticeURL.stackSize(plasmaCrawlNURL.STACK_TYPE_OVERHANG) +
@ -1800,7 +1800,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
try {
// work off fresh entries from the proxy or from the crawler
if (onlineCaution()) {
log.logFine("deQueue: online caution, omitting resource stack processing");
if (this.log.isFine()) log.logFine("deQueue: online caution, omitting resource stack processing");
return false;
}
@ -1814,7 +1814,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// possibly delete entries from last chunk
if ((this.dhtTransferChunk != null) && (this.dhtTransferChunk.getStatus() == plasmaDHTChunk.chunkStatus_COMPLETE)) {
String deletedURLs = this.dhtTransferChunk.deleteTransferIndexes();
this.log.logFine("Deleted from " + this.dhtTransferChunk.containers().length + " transferred RWIs locally, removed " + deletedURLs + " URL references");
if (this.log.isFine()) this.log.logFine("Deleted from " + this.dhtTransferChunk.containers().length + " transferred RWIs locally, removed " + deletedURLs + " URL references");
this.dhtTransferChunk = null;
}
@ -1841,13 +1841,13 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
}
if (crawlStacker.size() >= getConfigLong(CRAWLSTACK_SLOTS, 2000)) {
log.logFine("deQueue: too many processes in stack crawl thread queue (" + "stackCrawlQueue=" + crawlStacker.size() + ")");
if (this.log.isFine()) log.logFine("deQueue: too many processes in stack crawl thread queue (" + "stackCrawlQueue=" + crawlStacker.size() + ")");
return doneSomething;
}
// if we were interrupted we should return now
if (Thread.currentThread().isInterrupted()) {
log.logFine("deQueue: thread was interrupted");
if (this.log.isFine()) log.logFine("deQueue: thread was interrupted");
return false;
}
@ -1932,7 +1932,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// clean up delegated stack
checkInterruption();
if ((crawlQueues.delegatedURL.stackSize() > 1000)) {
log.logFine("Cleaning Delegated-URLs report stack, " + crawlQueues.delegatedURL.stackSize() + " entries on stack");
if (this.log.isFine()) log.logFine("Cleaning Delegated-URLs report stack, " + crawlQueues.delegatedURL.stackSize() + " entries on stack");
crawlQueues.delegatedURL.clearStack();
hasDoneSomething = true;
}
@ -1940,7 +1940,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// clean up error stack
checkInterruption();
if ((crawlQueues.errorURL.stackSize() > 1000)) {
log.logFine("Cleaning Error-URLs report stack, " + crawlQueues.errorURL.stackSize() + " entries on stack");
if (this.log.isFine()) log.logFine("Cleaning Error-URLs report stack, " + crawlQueues.errorURL.stackSize() + " entries on stack");
crawlQueues.errorURL.clearStack();
hasDoneSomething = true;
}
@ -1949,7 +1949,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
for (int i = 1; i <= 6; i++) {
checkInterruption();
if (crawlResults.getStackSize(i) > 1000) {
log.logFine("Cleaning Loaded-URLs report stack, " + crawlResults.getStackSize(i) + " entries on stack " + i);
if (this.log.isFine()) log.logFine("Cleaning Loaded-URLs report stack, " + crawlResults.getStackSize(i) + " entries on stack " + i);
crawlResults.clearStack(i);
hasDoneSomething = true;
}
@ -1962,7 +1962,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// clean up news
checkInterruption();
try {
log.logFine("Cleaning Incoming News, " + yacyCore.newsPool.size(yacyNewsPool.INCOMING_DB) + " entries on stack");
if (this.log.isFine()) log.logFine("Cleaning Incoming News, " + yacyCore.newsPool.size(yacyNewsPool.INCOMING_DB) + " entries on stack");
if (yacyCore.newsPool.automaticProcess() > 0) hasDoneSomething = true;
} catch (IOException e) {}
if (getConfigBool("cleanup.deletionProcessedNews", true)) {
@ -2134,7 +2134,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
plasmaParserDocument document = null;
int processCase = entry.processCase();
log.logFine("processResourceStack processCase=" + processCase +
if (this.log.isFine()) log.logFine("processResourceStack processCase=" + processCase +
", depth=" + entry.depth() +
", maxDepth=" + ((entry.profile() == null) ? "null" : Integer.toString(entry.profile().generalDepth())) +
", filter=" + ((entry.profile() == null) ? "null" : entry.profile().generalFilter()) +
@ -2247,7 +2247,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// strip out words
checkInterruption();
log.logFine("Condensing for '" + entry.url().toNormalform(false, true) + "'");
if (this.log.isFine()) log.logFine("Condensing for '" + entry.url().toNormalform(false, true) + "'");
plasmaCondenser condenser;
try {
condenser = new plasmaCondenser(document, entry.profile().indexText(), entry.profile().indexMedia());
@ -2289,7 +2289,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
try {
newEntry = wordIndex.storeDocument(entry, document, condenser);
} catch (IOException e) {
log.logFine("Not Indexed Resource '" + entry.url().toNormalform(false, true) + "': process case=" + processCase);
if (this.log.isFine()) log.logFine("Not Indexed Resource '" + entry.url().toNormalform(false, true) + "': process case=" + processCase);
addURLtoErrorDB(entry.url(), referrerURL.hash(), entry.initiator(), dc_title, "error storing url: " + e.getMessage(), new kelondroBitfield());
return;
}
@ -2304,7 +2304,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
// STORE WORD INDEX
if ((!entry.profile().indexText()) && (!entry.profile().indexMedia())) {
log.logFine("Not Indexed Resource '" + entry.url().toNormalform(false, true) + "': process case=" + processCase);
if (this.log.isFine()) log.logFine("Not Indexed Resource '" + entry.url().toNormalform(false, true) + "': process case=" + processCase);
addURLtoErrorDB(entry.url(), referrerURL.hash(), entry.initiator(), dc_title, plasmaCrawlEURL.DENIED_UNKNOWN_INDEXING_PROCESS_CASE, new kelondroBitfield());
return;
}
@ -2553,15 +2553,15 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
public boolean dhtTransferJob() {
String rejectReason = dhtShallTransfer();
if (rejectReason != null) {
log.logFine(rejectReason);
if (this.log.isFine()) log.logFine(rejectReason);
return false;
}
if (this.dhtTransferChunk == null) {
log.logFine("no DHT distribution: no transfer chunk defined");
if (this.log.isFine()) log.logFine("no DHT distribution: no transfer chunk defined");
return false;
}
if ((this.dhtTransferChunk != null) && (this.dhtTransferChunk.getStatus() != plasmaDHTChunk.chunkStatus_FILLED)) {
log.logFine("no DHT distribution: index distribution is in progress, status=" + this.dhtTransferChunk.getStatus());
if (this.log.isFine()) log.logFine("no DHT distribution: index distribution is in progress, status=" + this.dhtTransferChunk.getStatus());
return false;
}
@ -2575,7 +2575,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
if (ok) {
dhtTransferChunk.setStatus(plasmaDHTChunk.chunkStatus_COMPLETE);
log.logFine("DHT distribution: transfer COMPLETE");
if (this.log.isFine()) log.logFine("DHT distribution: transfer COMPLETE");
// adopt transfer count
if ((System.currentTimeMillis() - starttime) > (10000 * peerCount)) {
dhtTransferIndexCount--;
@ -2595,11 +2595,11 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<plasmaSwitchbo
if (dhtTransferChunk.getTransferFailedCounter() >= maxChunkFails) {
//System.out.println("DEBUG: " + dhtTransferChunk.getTransferFailedCounter() + " of " + maxChunkFails + " sendings failed for this chunk, aborting!");
dhtTransferChunk.setStatus(plasmaDHTChunk.chunkStatus_FAILED);
log.logFine("DHT distribution: transfer FAILED");
if (this.log.isFine()) log.logFine("DHT distribution: transfer FAILED");
}
else {
//System.out.println("DEBUG: " + dhtTransferChunk.getTransferFailedCounter() + " of " + maxChunkFails + " sendings failed for this chunk, retrying!");
log.logFine("DHT distribution: transfer FAILED, sending this chunk again");
if (this.log.isFine()) log.logFine("DHT distribution: transfer FAILED, sending this chunk again");
}
return false;
}

View File

@ -165,7 +165,7 @@ public class plasmaWebStructure {
crg.insert(0, header.toString());
try {
serverFileUtils.writeAndGZip(crg.toString().getBytes(), file);
log.logFine("wrote citation reference dump " + file.toString());
if (this.log.isFine()) log.logFine("wrote citation reference dump " + file.toString());
} catch (IOException e) {
e.printStackTrace();
}

View File

@ -484,6 +484,14 @@ public final class plasmaWordIndex implements indexRI {
return java.lang.Math.max(collections.size(), java.lang.Math.max(dhtInCache.size(), dhtOutCache.size()));
}
public int collectionsSize() {
return collections.size();
}
public int cacheSize() {
return dhtInCache.size() + dhtOutCache.size();
}
public int indexSize(String wordHash) {
int size = 0;
size += dhtInCache.indexSize(wordHash);

View File

@ -110,7 +110,7 @@ public class serverPortForwardingSch implements serverPortForwarding{
int localPort
) throws Exception {
try {
this.log.logFine("Initializing port forwarding via sch ...");
if (this.log.isFine()) this.log.logFine("Initializing port forwarding via sch ...");
this.switchboard = switchboard;
@ -205,7 +205,7 @@ public class serverPortForwardingSch implements serverPortForwarding{
public synchronized boolean reconnect() throws IOException {
if ((!this.isConnected()) && (!Thread.currentThread().isInterrupted())) {
this.log.logFine("Trying to reconnect to port forwarding host.");
if (this.log.isFine()) this.log.logFine("Trying to reconnect to port forwarding host.");
this.disconnect();
this.connect();
return this.isConnected();

View File

@ -207,7 +207,7 @@ public abstract class serverAbstractBusyThread extends serverAbstractThread impl
if (this.syncObject != null) {
synchronized (this.syncObject) {
if (this.log != null)
this.log.logFine("thread '" + this.getName()
if (this.log.isFine()) this.log.logFine("thread '" + this.getName()
+ "' has received a notification from thread '"
+ Thread.currentThread().getName() + "'.");
this.syncObject.notifyAll();

View File

@ -300,7 +300,7 @@ public final class serverCore extends serverAbstractBusyThread implements server
if (bindIP.startsWith("#")) {
String interfaceName = bindIP.substring(1);
String hostName = null;
this.log.logFine("Trying to determine IP address of interface '" + interfaceName + "'.");
if (this.log.isFine()) this.log.logFine("Trying to determine IP address of interface '" + interfaceName + "'.");
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
if (interfaces != null) {
@ -402,7 +402,7 @@ public final class serverCore extends serverAbstractBusyThread implements server
// prepare for new connection
// idleThreadCheck();
this.switchboard.handleBusyState(this.busySessions.size());
this.log.logFinest("* waiting for connections, " + this.busySessions.size() + " sessions running");
if (log.isFinest()) this.log.logFinest("* waiting for connections, " + this.busySessions.size() + " sessions running");
announceThreadBlockApply();
@ -638,7 +638,7 @@ public final class serverCore extends serverAbstractBusyThread implements server
*/
public void log(boolean outgoing, String request) {
serverCore.this.log.logFine(this.userAddress.getHostAddress() + "/" + this.identity + " " +
if (log.isFine()) log.logFine(this.userAddress.getHostAddress() + "/" + this.identity + " " +
"[" + ((busySessions == null)? -1 : busySessions.size()) + ", " + this.commandCounter +
((outgoing) ? "] > " : "] < ") +
request);

View File

@ -54,7 +54,7 @@ public class serverMemory {
lastGC = System.currentTimeMillis();
if (log.isFine()) log.logInfo("[gc] before: " + bytesToString(free) + ", after: " + bytesToString(free()) + ", call: " + info);
} else if (log.isFine()) {
log.logFinest("[gc] no execute, last run: " + (elapsed / 1000) + " seconds ago, call: " + info);
if (log.isFinest()) log.logFinest("[gc] no execute, last run: " + (elapsed / 1000) + " seconds ago, call: " + info);
}
}

View File

@ -67,9 +67,9 @@ public class ymageChart extends ymageMatrix {
String name;
String backgroundColor, foregroundColor;
public ymageChart(int width, int height, String backgroundColor, String foregroundColor,
public ymageChart(int width, int height, String backgroundColor, String foregroundColor, String lightColor,
int leftborder, int rightborder, int topborder, int bottomborder,
String name) {
String name, String subline) {
super(width, height, ymageMatrix.MODE_REPLACE, backgroundColor);
this.leftborder = leftborder;
this.rightborder = rightborder;
@ -82,6 +82,10 @@ public class ymageChart extends ymageMatrix {
this.setColor(foregroundColor);
ymageToolPrint.print(this, width / 2 - name.length() * 3, 6, 0, name, -1);
}
if (subline != null) {
this.setColor(lightColor);
ymageToolPrint.print(this, width / 2 - subline.length() * 3, 14, 0, subline, -1);
}
}
public void declareDimension(int dimensionType, int scale, int pixelperscale, int offset, String colorNaming, String colorScale, String name) {
@ -166,7 +170,7 @@ public class ymageChart extends ymageMatrix {
String scale = (invers) ? "333333" : "CCCCCC";
String green = (invers) ? "008800" : "008800";
String blue = (invers) ? "0000FF" : "0000FF";
ymageChart ip = new ymageChart(660, 240, bg, fg, 30, 30, 20, 20, "PEER PERFORMANCE GRAPH: PAGES/MINUTE and USED MEMORY");
ymageChart ip = new ymageChart(660, 240, bg, fg, fg, 30, 30, 20, 20, "PEER PERFORMANCE GRAPH: PAGES/MINUTE and USED MEMORY", "");
ip.declareDimension(DIMENSION_BOTTOM, 60, 60, -600, fg, scale, "TIME/SECONDS");
//ip.declareDimension(DIMENSION_TOP, 10, 40, "000000", null, "count");
ip.declareDimension(DIMENSION_LEFT, 50, 40, 0, green, scale , "PPM [PAGES/MINUTE]");