2009-03-02 00:58:14 +01:00
|
|
|
// ReverseIndexCache.java
|
2007-08-03 13:44:58 +02:00
|
|
|
// (C) 2005, 2006 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
|
|
|
|
// first published 2005 on http://yacy.net
|
2005-11-04 14:41:51 +01:00
|
|
|
//
|
2006-05-28 13:44:50 +02:00
|
|
|
// This is a part of YaCy, a peer-to-peer based web search engine
|
2005-05-17 10:25:04 +02:00
|
|
|
//
|
replaced old DHT transmission method with new method. Many things have changed! some of them:
- after a index selection is made, the index is splitted into its vertical components
- from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue
- each splitted chunk gets its own transmission thread
- multiple transmission threads are started concurrently
- the process can be monitored with the blocking queue servlet
To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed.
The new index distribution model using a vertical DHT was implemented. An abstraction of this model
is implemented in the new dht package as interface. The freeworld network has now a configuration
of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free.
This modification has three main targets:
- enhance the DHT transmission speed
- with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times.
- the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before.
with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs.
BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS.
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
|
|
|
// $LastChangedDate: 2009-01-31 00:33:47 +0100 (Sa, 31 Jan 2009) $
|
|
|
|
// $LastChangedRevision: 5544 $
|
|
|
|
// $LastChangedBy: orbiter $
|
2006-05-28 13:44:50 +02:00
|
|
|
//
|
|
|
|
// LICENSE
|
|
|
|
//
|
2005-05-17 10:25:04 +02:00
|
|
|
// This program is free software; you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation; either version 2 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with this program; if not, write to the Free Software
|
|
|
|
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
|
2009-03-02 11:00:32 +01:00
|
|
|
package de.anomic.kelondro.text;
|
2005-05-17 10:25:04 +02:00
|
|
|
|
2005-07-06 16:48:41 +02:00
|
|
|
import java.io.File;
|
|
|
|
import java.io.IOException;
|
2008-06-18 01:56:39 +02:00
|
|
|
import java.util.ArrayList;
|
2009-01-21 19:23:37 +01:00
|
|
|
import java.util.Iterator;
|
2006-08-01 12:30:55 +02:00
|
|
|
import java.util.Set;
|
2006-05-26 11:32:50 +02:00
|
|
|
|
2009-01-30 16:33:00 +01:00
|
|
|
import de.anomic.kelondro.index.Row;
|
2009-01-30 23:08:08 +01:00
|
|
|
import de.anomic.kelondro.order.CloneableIterator;
|
2009-01-30 23:44:20 +01:00
|
|
|
import de.anomic.kelondro.util.MemoryControl;
|
|
|
|
import de.anomic.kelondro.util.ScoreCluster;
|
2009-01-31 00:33:47 +01:00
|
|
|
import de.anomic.kelondro.util.Log;
|
2005-05-17 10:25:04 +02:00
|
|
|
|
2009-03-02 11:00:32 +01:00
|
|
|
public final class IndexCache implements Index, IndexReader, Iterable<ReferenceContainer> {
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2005-05-17 10:25:04 +02:00
|
|
|
// class variables
|
2009-01-30 16:33:00 +01:00
|
|
|
private final ScoreCluster<String> hashScore;
|
|
|
|
private final ScoreCluster<String> hashDate;
|
2006-08-16 21:49:31 +02:00
|
|
|
private long initTime;
|
2008-05-20 11:29:01 +02:00
|
|
|
private int cacheEntityMaxCount; // the maximum number of cache slots for RWI entries
|
|
|
|
public int cacheReferenceCountLimit; // the maximum number of references to a single RWI entity
|
|
|
|
public long cacheReferenceAgeLimit; // the maximum age (= time not changed) of a RWI entity
|
2009-01-31 00:33:47 +01:00
|
|
|
private final Log log;
|
2009-01-22 01:03:54 +01:00
|
|
|
private final File dumpFile;
|
2009-03-02 12:04:13 +01:00
|
|
|
private ReferenceContainerCache heap;
|
2006-08-10 23:21:50 +02:00
|
|
|
|
2008-04-06 22:31:16 +02:00
|
|
|
@SuppressWarnings("unchecked")
|
2009-03-02 11:00:32 +01:00
|
|
|
public IndexCache(
|
2009-01-01 23:31:16 +01:00
|
|
|
final File databaseRoot,
|
2009-01-30 16:33:00 +01:00
|
|
|
final Row payloadrow,
|
2009-01-01 23:31:16 +01:00
|
|
|
final int entityCacheMaxSize,
|
|
|
|
final int wCacheReferenceCountLimitInit,
|
|
|
|
final long wCacheReferenceAgeLimitInit,
|
|
|
|
final String newHeapName,
|
2009-01-31 00:33:47 +01:00
|
|
|
final Log log) {
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2005-05-17 10:25:04 +02:00
|
|
|
// creates a new index cache
|
|
|
|
// the cache has a back-end where indexes that do not fit in the cache are flushed
|
2009-01-30 16:33:00 +01:00
|
|
|
this.hashScore = new ScoreCluster<String>();
|
|
|
|
this.hashDate = new ScoreCluster<String>();
|
2006-08-16 21:49:31 +02:00
|
|
|
this.initTime = System.currentTimeMillis();
|
2008-05-20 11:29:01 +02:00
|
|
|
this.cacheEntityMaxCount = entityCacheMaxSize;
|
2007-03-03 01:55:51 +01:00
|
|
|
this.cacheReferenceCountLimit = wCacheReferenceCountLimitInit;
|
|
|
|
this.cacheReferenceAgeLimit = wCacheReferenceAgeLimitInit;
|
2005-05-17 10:25:04 +02:00
|
|
|
this.log = log;
|
2009-01-22 01:03:54 +01:00
|
|
|
this.dumpFile = new File(databaseRoot, newHeapName);
|
2009-03-02 12:04:13 +01:00
|
|
|
this.heap = new ReferenceContainerCache(payloadrow);
|
2006-02-14 01:12:07 +01:00
|
|
|
|
2005-05-17 10:25:04 +02:00
|
|
|
// read in dump of last session
|
2009-01-01 23:31:16 +01:00
|
|
|
boolean initFailed = false;
|
2009-01-22 01:03:54 +01:00
|
|
|
if (dumpFile.exists()) try {
|
|
|
|
heap.initWriteModeFromBLOB(dumpFile);
|
2009-01-01 23:31:16 +01:00
|
|
|
} catch (IOException e) {
|
|
|
|
initFailed = true;
|
|
|
|
e.printStackTrace();
|
|
|
|
}
|
|
|
|
if (initFailed) {
|
|
|
|
log.logSevere("unable to restore cache dump");
|
|
|
|
// get empty dump
|
|
|
|
heap.initWriteMode();
|
2009-01-22 01:03:54 +01:00
|
|
|
} else if (dumpFile.exists()) {
|
2009-01-01 23:31:16 +01:00
|
|
|
// initialize scores for cache organization
|
2009-03-02 00:58:14 +01:00
|
|
|
for (final ReferenceContainer ic : (Iterable<ReferenceContainer>) heap.referenceIterator(null, false, true)) {
|
2009-01-01 23:31:16 +01:00
|
|
|
this.hashDate.setScore(ic.getWordHash(), intTime(ic.lastWrote()));
|
|
|
|
this.hashScore.setScore(ic.getWordHash(), ic.size());
|
2008-04-02 15:18:23 +02:00
|
|
|
}
|
2008-04-07 15:12:58 +02:00
|
|
|
} else {
|
|
|
|
heap.initWriteMode();
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
|
|
|
}
|
2008-05-24 14:30:50 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* clear the content
|
|
|
|
* @throws IOException
|
|
|
|
*/
|
|
|
|
public void clear() {
|
|
|
|
hashScore.clear();
|
|
|
|
hashDate.clear();
|
|
|
|
initTime = System.currentTimeMillis();
|
2009-01-06 10:38:08 +01:00
|
|
|
heap.clear();
|
2008-05-24 14:30:50 +02:00
|
|
|
}
|
2005-05-17 10:25:04 +02:00
|
|
|
|
2006-12-22 13:54:56 +01:00
|
|
|
public int minMem() {
|
2007-08-20 19:36:43 +02:00
|
|
|
// there is no specific large array that needs to be maintained
|
|
|
|
// this value is just a guess of the possible overhead
|
|
|
|
return 100 * 1024; // 100 kb
|
2006-12-22 13:54:56 +01:00
|
|
|
}
|
2006-11-05 03:10:40 +01:00
|
|
|
|
2005-05-17 10:25:04 +02:00
|
|
|
// cache settings
|
2006-09-14 02:51:02 +02:00
|
|
|
public int maxURLinCache() {
|
2006-03-10 14:57:30 +01:00
|
|
|
if (hashScore.size() == 0) return 0;
|
2006-02-25 22:05:19 +01:00
|
|
|
return hashScore.getMaxScore();
|
|
|
|
}
|
|
|
|
|
2006-09-14 02:51:02 +02:00
|
|
|
public long minAgeOfCache() {
|
2006-03-10 14:57:30 +01:00
|
|
|
if (hashDate.size() == 0) return 0;
|
2006-03-09 12:31:17 +01:00
|
|
|
return System.currentTimeMillis() - longEmit(hashDate.getMaxScore());
|
|
|
|
}
|
|
|
|
|
2006-09-14 02:51:02 +02:00
|
|
|
public long maxAgeOfCache() {
|
2006-03-10 14:57:30 +01:00
|
|
|
if (hashDate.size() == 0) return 0;
|
2006-02-25 22:05:19 +01:00
|
|
|
return System.currentTimeMillis() - longEmit(hashDate.getMinScore());
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
|
|
|
|
2008-08-02 14:12:04 +02:00
|
|
|
public void setMaxWordCount(final int maxWords) {
|
2008-05-20 11:29:01 +02:00
|
|
|
this.cacheEntityMaxCount = maxWords;
|
2006-03-13 11:43:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public int getMaxWordCount() {
|
2008-05-20 11:29:01 +02:00
|
|
|
return this.cacheEntityMaxCount;
|
2005-09-20 12:10:34 +02:00
|
|
|
}
|
2006-02-14 01:12:07 +01:00
|
|
|
|
2007-03-18 20:45:23 +01:00
|
|
|
public int size() {
|
2009-02-10 01:48:54 +01:00
|
|
|
if (heap == null) return 0;
|
2008-04-06 22:31:16 +02:00
|
|
|
return heap.size();
|
2006-03-15 17:01:42 +01:00
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized CloneableIterator<ReferenceContainer> referenceIterator(final String startWordHash, final boolean rot, final boolean ram) {
|
2006-08-01 12:30:55 +02:00
|
|
|
// we return an iterator object that creates top-level-clones of the indexContainers
|
|
|
|
// in the cache, so that manipulations of the iterated objects do not change
|
|
|
|
// objects in the cache.
|
replaced old DHT transmission method with new method. Many things have changed! some of them:
- after a index selection is made, the index is splitted into its vertical components
- from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue
- each splitted chunk gets its own transmission thread
- multiple transmission threads are started concurrently
- the process can be monitored with the blocking queue servlet
To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed.
The new index distribution model using a vertical DHT was implemented. An abstraction of this model
is implemented in the new dht package as interface. The freeworld network has now a configuration
of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free.
This modification has three main targets:
- enhance the DHT transmission speed
- with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times.
- the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before.
with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs.
BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS.
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
|
|
|
assert ram == true;
|
2009-03-02 00:58:14 +01:00
|
|
|
return heap.referenceIterator(startWordHash, rot, ram);
|
2006-01-14 00:59:04 +01:00
|
|
|
}
|
|
|
|
|
2007-02-28 12:13:23 +01:00
|
|
|
public synchronized String maxScoreWordHash() {
|
2008-09-23 14:11:19 +02:00
|
|
|
if (heap == null || heap.size() == 0) return null;
|
2007-02-28 12:13:23 +01:00
|
|
|
try {
|
2008-06-06 18:01:27 +02:00
|
|
|
return hashScore.getMaxObject();
|
2008-08-02 14:12:04 +02:00
|
|
|
} catch (final Exception e) {
|
2007-02-28 12:13:23 +01:00
|
|
|
log.logSevere("flushFromMem: " + e.getMessage(), e);
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
2009-01-06 14:51:59 +01:00
|
|
|
public String bestFlushWordHash() {
|
2005-06-01 16:24:25 +02:00
|
|
|
// select appropriate hash
|
|
|
|
// we have 2 different methods to find a good hash:
|
|
|
|
// - the oldest entry in the cache
|
|
|
|
// - the entry with maximum count
|
2009-01-06 14:51:59 +01:00
|
|
|
if (heap == null || heap.size() == 0) return null;
|
2005-06-01 16:24:25 +02:00
|
|
|
try {
|
2009-01-02 12:38:20 +01:00
|
|
|
//return hashScore.getMaxObject();
|
2008-04-06 22:31:16 +02:00
|
|
|
String hash = null;
|
2008-08-02 14:12:04 +02:00
|
|
|
final int count = hashScore.getMaxScore();
|
2008-04-06 22:31:16 +02:00
|
|
|
if ((count >= cacheReferenceCountLimit) &&
|
2008-06-06 18:01:27 +02:00
|
|
|
((hash = hashScore.getMaxObject()) != null)) {
|
2008-04-06 22:31:16 +02:00
|
|
|
// we MUST flush high-score entries, because a loop deletes entries in cache until this condition fails
|
|
|
|
// in this cache we MUST NOT check wCacheMinAge
|
|
|
|
return hash;
|
|
|
|
}
|
2008-08-02 14:12:04 +02:00
|
|
|
final long oldestTime = longEmit(hashDate.getMinScore());
|
2008-04-06 22:31:16 +02:00
|
|
|
if (((System.currentTimeMillis() - oldestTime) > cacheReferenceAgeLimit) &&
|
2008-06-06 18:01:27 +02:00
|
|
|
((hash = hashDate.getMinObject()) != null)) {
|
2008-04-06 22:31:16 +02:00
|
|
|
// flush out-dated entries
|
2006-02-25 22:05:19 +01:00
|
|
|
return hash;
|
2008-04-06 22:31:16 +02:00
|
|
|
}
|
|
|
|
// cases with respect to memory situation
|
2009-01-30 16:33:00 +01:00
|
|
|
if (MemoryControl.free() < 100000) {
|
2008-04-06 22:31:16 +02:00
|
|
|
// urgent low-memory case
|
2008-06-06 18:01:27 +02:00
|
|
|
hash = hashScore.getMaxObject(); // flush high-score entries (saves RAM)
|
2008-04-06 22:31:16 +02:00
|
|
|
} else {
|
|
|
|
// not-efficient-so-far case. cleans up unnecessary cache slots
|
2008-06-06 18:01:27 +02:00
|
|
|
hash = hashDate.getMinObject(); // flush oldest entries
|
2008-04-06 22:31:16 +02:00
|
|
|
}
|
|
|
|
if (hash == null) {
|
2009-03-02 00:58:14 +01:00
|
|
|
final ReferenceContainer ic = heap.referenceIterator(null, false, true).next();
|
2008-06-18 01:56:39 +02:00
|
|
|
if (ic != null) hash = ic.getWordHash();
|
2008-04-06 22:31:16 +02:00
|
|
|
}
|
|
|
|
return hash;
|
2009-01-02 12:38:20 +01:00
|
|
|
|
2008-08-02 14:12:04 +02:00
|
|
|
} catch (final Exception e) {
|
2005-08-30 23:32:59 +02:00
|
|
|
log.logSevere("flushFromMem: " + e.getMessage(), e);
|
2005-06-01 16:24:25 +02:00
|
|
|
}
|
2006-02-14 01:12:07 +01:00
|
|
|
return null;
|
2005-07-17 23:22:18 +02:00
|
|
|
}
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized ArrayList<ReferenceContainer> bestFlushContainers(final int count) {
|
|
|
|
final ArrayList<ReferenceContainer> containerList = new ArrayList<ReferenceContainer>();
|
2008-06-18 01:56:39 +02:00
|
|
|
String hash;
|
2009-03-02 00:58:14 +01:00
|
|
|
ReferenceContainer container;
|
2008-06-18 01:56:39 +02:00
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
hash = bestFlushWordHash();
|
|
|
|
if (hash == null) return containerList;
|
2009-03-02 00:58:14 +01:00
|
|
|
container = heap.deleteAllReferences(hash);
|
2008-06-18 01:56:39 +02:00
|
|
|
assert (container != null);
|
|
|
|
if (container == null) return containerList;
|
|
|
|
hashScore.deleteScore(hash);
|
|
|
|
hashDate.deleteScore(hash);
|
|
|
|
containerList.add(container);
|
|
|
|
}
|
|
|
|
return containerList;
|
|
|
|
}
|
|
|
|
|
2008-08-02 14:12:04 +02:00
|
|
|
private int intTime(final long longTime) {
|
2006-08-16 21:49:31 +02:00
|
|
|
return (int) Math.max(0, ((longTime - initTime) / 1000));
|
2005-05-31 19:39:14 +02:00
|
|
|
}
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2008-08-02 14:12:04 +02:00
|
|
|
private long longEmit(final int intTime) {
|
2006-08-16 21:49:31 +02:00
|
|
|
return (((long) intTime) * (long) 1000) + initTime;
|
2006-02-25 22:05:19 +01:00
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public boolean hasReferences(final String wordHash) {
|
|
|
|
return heap.hasReferences(wordHash);
|
2007-01-08 14:13:30 +01:00
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public int countReferences(String key) {
|
|
|
|
return this.heap.countReferences(key);
|
2007-08-25 01:12:59 +02:00
|
|
|
}
|
2009-03-02 00:58:14 +01:00
|
|
|
|
|
|
|
public synchronized ReferenceContainer getReferences(final String wordHash, final Set<String> urlselection) {
|
2007-10-03 17:34:16 +02:00
|
|
|
if (wordHash == null) return null;
|
|
|
|
|
2006-09-14 02:51:02 +02:00
|
|
|
// retrieve container
|
2009-03-02 00:58:14 +01:00
|
|
|
ReferenceContainer container = heap.getReferences(wordHash, null);
|
2006-09-14 02:51:02 +02:00
|
|
|
|
|
|
|
// We must not use the container from cache to store everything we find,
|
|
|
|
// as that container remains linked to in the cache and might be changed later
|
|
|
|
// while the returned container is still in use.
|
|
|
|
// create a clone from the container
|
|
|
|
if (container != null) container = container.topLevelClone();
|
|
|
|
|
|
|
|
// select the urlselection
|
|
|
|
if ((urlselection != null) && (container != null)) container.select(urlselection);
|
|
|
|
|
|
|
|
return container;
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized ReferenceContainer deleteAllReferences(final String wordHash) {
|
2006-02-14 01:12:07 +01:00
|
|
|
// returns the index that had been deleted
|
2009-02-10 02:08:06 +01:00
|
|
|
if (wordHash == null || heap == null) return null;
|
2009-03-02 00:58:14 +01:00
|
|
|
final ReferenceContainer container = heap.deleteAllReferences(wordHash);
|
2006-10-28 02:22:10 +02:00
|
|
|
hashScore.deleteScore(wordHash);
|
|
|
|
hashDate.deleteScore(wordHash);
|
|
|
|
return container;
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized boolean removeReference(final String wordHash, final String urlHash) {
|
|
|
|
final boolean removed = heap.removeReference(wordHash, urlHash);
|
2008-04-06 22:31:16 +02:00
|
|
|
if (removed) {
|
2009-03-02 00:58:14 +01:00
|
|
|
if (heap.hasReferences(wordHash)) {
|
2006-10-28 02:22:10 +02:00
|
|
|
hashScore.decScore(wordHash);
|
|
|
|
hashDate.setScore(wordHash, intTime(System.currentTimeMillis()));
|
2008-04-06 22:31:16 +02:00
|
|
|
} else {
|
|
|
|
hashScore.deleteScore(wordHash);
|
|
|
|
hashDate.deleteScore(wordHash);
|
2006-08-01 12:30:55 +02:00
|
|
|
}
|
2006-10-28 02:22:10 +02:00
|
|
|
return true;
|
2006-08-01 12:30:55 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized int removeReferences(final String wordHash, final Set<String> urlHashes) {
|
2006-08-01 12:30:55 +02:00
|
|
|
if (urlHashes.size() == 0) return 0;
|
2009-03-02 00:58:14 +01:00
|
|
|
final int c = heap.removeReferences(wordHash, urlHashes);
|
2008-04-06 22:31:16 +02:00
|
|
|
if (c > 0) {
|
2006-10-28 02:22:10 +02:00
|
|
|
// removal successful
|
2009-03-02 00:58:14 +01:00
|
|
|
if (heap.hasReferences(wordHash)) {
|
2008-06-19 20:27:48 +02:00
|
|
|
hashScore.addScore(wordHash, -c);
|
2006-10-28 02:22:10 +02:00
|
|
|
hashDate.setScore(wordHash, intTime(System.currentTimeMillis()));
|
2008-06-19 20:27:48 +02:00
|
|
|
} else {
|
|
|
|
hashScore.deleteScore(wordHash);
|
|
|
|
hashDate.deleteScore(wordHash);
|
2006-02-14 01:12:07 +01:00
|
|
|
}
|
2008-04-06 22:31:16 +02:00
|
|
|
return c;
|
2006-02-14 01:12:07 +01:00
|
|
|
}
|
2006-10-28 02:22:10 +02:00
|
|
|
return 0;
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
2006-03-13 11:43:12 +01:00
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized void addReferences(final ReferenceContainer container) {
|
2005-07-20 02:39:06 +02:00
|
|
|
// this puts the entries into the cache, not into the assortment directly
|
replaced old DHT transmission method with new method. Many things have changed! some of them:
- after a index selection is made, the index is splitted into its vertical components
- from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue
- each splitted chunk gets its own transmission thread
- multiple transmission threads are started concurrently
- the process can be monitored with the blocking queue servlet
To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed.
The new index distribution model using a vertical DHT was implemented. An abstraction of this model
is implemented in the new dht package as interface. The freeworld network has now a configuration
of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free.
This modification has three main targets:
- enhance the DHT transmission speed
- with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times.
- the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before.
with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs.
BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS.
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
|
|
|
if ((container == null) || (container.size() == 0) || heap == null) return;
|
2005-11-04 14:41:51 +01:00
|
|
|
|
2005-10-09 06:43:07 +02:00
|
|
|
// put new words into cache
|
2009-03-02 00:58:14 +01:00
|
|
|
heap.addReferences(container);
|
|
|
|
hashScore.setScore(container.getWordHash(), heap.countReferences(container.getWordHash()));
|
replaced old DHT transmission method with new method. Many things have changed! some of them:
- after a index selection is made, the index is splitted into its vertical components
- from differrent index selctions the splitted components can be accumulated before they are placed into the transmission queue
- each splitted chunk gets its own transmission thread
- multiple transmission threads are started concurrently
- the process can be monitored with the blocking queue servlet
To implement that, a new package de.anomic.yacy.dht was created. Some old files have been removed.
The new index distribution model using a vertical DHT was implemented. An abstraction of this model
is implemented in the new dht package as interface. The freeworld network has now a configuration
of two vertial partitions; sixteen partitions are planned and will be configured if the process is bug-free.
This modification has three main targets:
- enhance the DHT transmission speed
- with a vertical DHT, a search will speed up. With two partitions, two times. With sixteen, sixteen times.
- the vertical DHT will apply a semi-dht for URLs, and peers will receive a fraction of the overall URLs they received before.
with two partitions, the fractions will be halve. With sixteen partitions, a 1/16 of the previous number of URLs.
BE CAREFULL, THIS IS A MAJOR CODE CHANGE, POSSIBLY FULL OF BUGS AND HARMFUL THINGS.
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5586 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-02-10 01:06:59 +01:00
|
|
|
hashDate.setScore(container.getWordHash(), intTime(System.currentTimeMillis()));
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public synchronized void addEntry(final String wordHash, final ReferenceRow newEntry, final long updateTime, final boolean dhtCase) {
|
2009-03-09 11:14:49 +01:00
|
|
|
if (heap == null) return; // there was already a shutdown
|
2008-04-06 22:31:16 +02:00
|
|
|
heap.addEntry(wordHash, newEntry);
|
2006-12-07 03:40:57 +01:00
|
|
|
hashScore.incScore(wordHash);
|
|
|
|
hashDate.setScore(wordHash, intTime(updateTime));
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
|
|
|
|
2006-12-05 03:47:51 +01:00
|
|
|
public synchronized void close() {
|
2006-01-23 14:45:14 +01:00
|
|
|
// dump cache
|
2005-05-17 10:25:04 +02:00
|
|
|
try {
|
2009-01-01 23:31:16 +01:00
|
|
|
//heap.dumpold(this.oldDumpFile);
|
2009-01-22 01:03:54 +01:00
|
|
|
heap.dump(this.dumpFile);
|
2008-08-02 14:12:04 +02:00
|
|
|
} catch (final IOException e){
|
2005-08-30 23:32:59 +02:00
|
|
|
log.logSevere("unable to dump cache: " + e.getMessage(), e);
|
2005-05-17 10:25:04 +02:00
|
|
|
}
|
2008-05-24 14:30:50 +02:00
|
|
|
heap = null;
|
|
|
|
hashScore.clear();
|
|
|
|
hashDate.clear();
|
2005-07-20 02:39:06 +02:00
|
|
|
}
|
2009-01-21 19:23:37 +01:00
|
|
|
|
2009-03-02 00:58:14 +01:00
|
|
|
public Iterator<ReferenceContainer> iterator() {
|
|
|
|
return referenceIterator(null, false, true);
|
2009-01-21 19:23:37 +01:00
|
|
|
}
|
2006-01-14 00:59:04 +01:00
|
|
|
}
|