yacy_search_server/source/net/yacy/kelondro/table/Table.java
orbiter bea3b99aff moved table and util classes
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6397 6c8d7289-2bf4-0310-a012-ef5d649a1542
2009-10-10 01:14:19 +00:00

856 lines
37 KiB
Java

// Table.java
// (C) 2008 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 14.01.2008 on http://yacy.net
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package net.yacy.kelondro.table;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.ConcurrentModificationException;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.TreeSet;
import net.yacy.kelondro.index.Column;
import net.yacy.kelondro.index.HandleMap;
import net.yacy.kelondro.index.ObjectIndex;
import net.yacy.kelondro.index.Row;
import net.yacy.kelondro.index.RowCollection;
import net.yacy.kelondro.index.RowSet;
import net.yacy.kelondro.index.Row.Entry;
import net.yacy.kelondro.io.BufferedRecords;
import net.yacy.kelondro.io.Records;
import net.yacy.kelondro.logging.Log;
import net.yacy.kelondro.order.CloneableIterator;
import net.yacy.kelondro.order.NaturalOrder;
import net.yacy.kelondro.util.FileUtils;
import net.yacy.kelondro.util.MemoryControl;
import net.yacy.kelondro.util.kelondroException;
/*
* The EcoIndex builds upon the EcoFS and tries to reduce the number of IO requests that the
* EcoFS must do to a minimum. In best cases, no IO has to be done for read operations (complete database shadow in RAM)
* and a rare number of write IO operations must be done for a large number of table-writings (using the write buffer of EcoFS)
* To make the EcoIndex scalable in question of available RAM, there are two elements that must be scalable:
* - the access index can be either completely in RAM (kelondroRAMIndex) or it is file-based (kelondroTree)
* - the content cache can be either a complete RAM-based shadow of the File, or empty.
* The content cache can also be deleted during run-time, if the available RAM gets too low.
*/
public class Table implements ObjectIndex {
// static tracker objects
private static TreeMap<String, Table> tableTracker = new TreeMap<String, Table>();
public static final long maxarraylength = 134217727L; // that may be the maxmimum size of array length in some JVMs
private static final long minmemremaining = 20 * 1024 * 1024; // if less than this memory is remaininig, the memory copy of a table is abandoned
//private int fail;
private final int buffersize;
protected HandleMap index;
protected BufferedRecords file;
protected Row rowdef;
protected File tablefile;
protected RowSet table;
protected Row taildef;
public Table(
final File tablefile,
final Row rowdef,
final int buffersize,
final int initialSpace,
final boolean useTailCache,
final boolean exceed134217727) {
this.tablefile = tablefile;
this.rowdef = rowdef;
this.buffersize = buffersize;
//this.fail = 0;
// define the taildef, a row like the rowdef but without the first column
final Column[] cols = new Column[rowdef.columns() - 1];
for (int i = 0; i < cols.length; i++) {
cols[i] = rowdef.column(i + 1);
}
this.taildef = new Row(cols, NaturalOrder.naturalOrder);
// initialize table file
boolean freshFile = false;
if (!tablefile.exists()) {
// make new file
freshFile = true;
FileOutputStream fos = null;
try {
fos = new FileOutputStream(tablefile);
} catch (final FileNotFoundException e) {
// should not happen
Log.logSevere("Table", "", e);
}
if (fos != null) try { fos.close(); } catch (final IOException e) {}
}
try {
// open an existing table file
final int fileSize = (int) tableSize(tablefile, rowdef.objectsize);
// initialize index and copy table
final int records = Math.max(fileSize, initialSpace);
final long neededRAM4table = (records) * ((rowdef.objectsize) + 4L) * 3L;
table = ((exceed134217727 || neededRAM4table < maxarraylength) &&
(useTailCache && MemoryControl.available() > neededRAM4table + 200 * 1024 * 1024)) ?
new RowSet(taildef, records) : null;
Log.logInfo("TABLE", "initialization of " + tablefile.getName() + ". table copy: " + ((table == null) ? "no" : "yes") + ", available RAM: " + (MemoryControl.available() / 1024 / 1024) + "MB, needed: " + (neededRAM4table/1024/1024 + 200) + "MB, allocating space for " + records + " entries");
final long neededRAM4index = 2 * 1024 * 1024 + records * (rowdef.primaryKeyLength + 4) * 3 / 2;
if (!MemoryControl.request(neededRAM4index, false)) {
// despite calculations seemed to show that there is enough memory for the table AND the index
// there is now not enough memory left for the index. So delete the table again to free the memory
// for the index
Log.logSevere("TABLE", tablefile.getName() + ": not enough RAM (" + (MemoryControl.available() / 1024 / 1024) + "MB) left for index, deleting allocated table space to enable index space allocation (needed: " + (neededRAM4index / 1024 / 1024) + "MB)");
table = null; System.gc();
Log.logSevere("TABLE", tablefile.getName() + ": RAM after releasing the table: " + (MemoryControl.available() / 1024 / 1024) + "MB");
}
index = new HandleMap(rowdef.primaryKeyLength, rowdef.objectOrder, 4, records, 100000);
HandleMap errors = new HandleMap(rowdef.primaryKeyLength, NaturalOrder.naturalOrder, 4, records, 10);
Log.logInfo("TABLE", tablefile + ": TABLE " + tablefile.toString() + " has table copy " + ((table == null) ? "DISABLED" : "ENABLED"));
// read all elements from the file into the copy table
Log.logInfo("TABLE", "initializing RAM index for TABLE " + tablefile.getName() + ", please wait.");
int i = 0;
byte[] key;
if (table == null) {
final Iterator<byte[]> ki = new ChunkIterator(tablefile, rowdef.objectsize, rowdef.primaryKeyLength);
while (ki.hasNext()) {
key = ki.next();
// write the key into the index table
assert key != null;
if (key == null) {i++; continue;}
if (rowdef.objectOrder.wellformed(key)) {
index.putUnique(key, i++);
} else {
errors.putUnique(key, i++);
}
}
} else {
byte[] record;
key = new byte[rowdef.primaryKeyLength];
final Iterator<byte[]> ri = new ChunkIterator(tablefile, rowdef.objectsize, rowdef.objectsize);
while (ri.hasNext()) {
record = ri.next();
assert record != null;
if (record == null) {i++; continue;}
System.arraycopy(record, 0, key, 0, rowdef.primaryKeyLength);
// write the key into the index table
if (rowdef.objectOrder.wellformed(key)) {
index.putUnique(key, i++);
// write the tail into the table
table.addUnique(taildef.newEntry(record, rowdef.primaryKeyLength, true));
if (abandonTable()) {
table = null;
break;
}
} else {
errors.putUnique(key, i++);
}
}
}
// open the file
this.file = new BufferedRecords(new Records(tablefile, rowdef.objectsize), this.buffersize);
// clean up the file by cleaning badly formed entries
int errorc = errors.size();
int errorcc = 0;
int idx;
for (Entry entry: errors) {
key = entry.getPrimaryKeyBytes();
idx = (int) entry.getColLong(1);
Log.logWarning("Table", "removing not well-formed entry " + idx + " with key: " + NaturalOrder.arrayList(key, 0, key.length) + ", " + errorcc++ + "/" + errorc);
removeInFile(idx);
}
assert index.size() == this.file.size() : "index.size() = " + index.size() + ", this.file.size() = " + this.file.size();
// remove doubles
if (!freshFile) {
final ArrayList<Long[]> doubles = index.removeDoubles();
//assert index.size() + doubles.size() + fail == i;
//System.out.println(" -removed " + doubles.size() + " doubles- done.");
if (doubles.size() > 0) {
Log.logInfo("TABLE", tablefile + ": WARNING - TABLE " + tablefile + " has " + doubles.size() + " doubles");
// from all the doubles take one, put it back to the index and remove the others from the file
// first put back one element each
final byte[] record = new byte[rowdef.objectsize];
key = new byte[rowdef.primaryKeyLength];
for (final Long[] ds: doubles) {
file.get(ds[0].intValue(), record, 0);
System.arraycopy(record, 0, key, 0, rowdef.primaryKeyLength);
index.putUnique(key, ds[0].intValue());
}
// then remove the other doubles by removing them from the table, but do a re-indexing while doing that
// first aggregate all the delete positions because the elements from the top positions must be removed first
final TreeSet<Long> delpos = new TreeSet<Long>();
for (final Long[] ds: doubles) {
for (int j = 1; j < ds.length; j++) delpos.add(ds[j]);
}
// now remove the entries in a sorted way (top-down)
Long top;
while (delpos.size() > 0) {
top = delpos.last();
delpos.remove(top);
removeInFile(top.intValue());
}
}
}
} catch (final FileNotFoundException e) {
// should never happen
Log.logSevere("Table", "", e);
throw new kelondroException(e.getMessage());
} catch (final IOException e) {
Log.logSevere("Table", "", e);
throw new kelondroException(e.getMessage());
}
// track this table
tableTracker.put(tablefile.toString(), this);
}
private boolean abandonTable() {
// check if not enough memory is there to maintain a memory copy of the table
return MemoryControl.available() < minmemremaining;
}
public static long tableSize(final File tablefile, final int recordsize) throws IOException {
// returns number of records in table
return Records.tableSize(tablefile, recordsize);
}
public static final Iterator<String> filenames() {
// iterates string objects; all file names from record tracker
return tableTracker.keySet().iterator();
}
public static final Map<String, String> memoryStats(final String filename) {
// returns a map for each file in the tracker;
// the map represents properties for each record objects,
// i.e. for cache memory allocation
final Table theTABLE = tableTracker.get(filename);
return theTABLE.memoryStats();
}
private final Map<String, String> memoryStats() {
// returns statistical data about this object
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
final HashMap<String, String> map = new HashMap<String, String>();
map.put("tableSize", Integer.toString(index.size()));
map.put("tableKeyChunkSize", Integer.toString(index.row().objectsize));
map.put("tableKeyMem", Integer.toString((int) (((long) index.row().objectsize) * ((long) index.size()) * RowCollection.growfactor100 / 100L)));
map.put("tableValueChunkSize", (table == null) ? "0" : Integer.toString(table.row().objectsize));
map.put("tableValueMem", (table == null) ? "0" : Integer.toString((int) (((long) table.row().objectsize) * ((long) table.size()) * RowCollection.growfactor100 / 100L)));
return map;
}
public boolean usesFullCopy() {
return this.table != null;
}
public static int staticRAMIndexNeed(final File f, final Row rowdef) throws IOException {
return (int) (((long)(rowdef.primaryKeyLength + 4)) * tableSize(f, rowdef.objectsize) * RowCollection.growfactor100 / 100L);
}
public synchronized void addUnique(final Entry row) throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
final int i = (int) file.size();
index.putUnique(row.getPrimaryKeyBytes(), i);
if (table != null) {
assert table.size() == i;
table.addUnique(taildef.newEntry(row.bytes(), rowdef.primaryKeyLength, true));
if (abandonTable()) table = null;
}
file.add(row.bytes(), 0);
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
}
public synchronized void addUnique(final List<Entry> rows) throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
final Iterator<Entry> i = rows.iterator();
while (i.hasNext()) {
addUnique(i.next());
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
}
/**
* remove double-entries from the table
* this process calls the underlying removeDoubles() method from the table index
* and
*/
public synchronized ArrayList<RowCollection> removeDoubles() throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
final ArrayList<RowCollection> report = new ArrayList<RowCollection>();
RowSet rows;
final TreeSet<Long> d = new TreeSet<Long>();
final byte[] b = new byte[rowdef.objectsize];
Long L;
Row.Entry inconsistentEntry;
// iterate over all entries that have inconsistent index references
long lastlog = System.currentTimeMillis();
for (final Long[] is: index.removeDoubles()) {
// 'is' is the set of all indexes, that have the same reference
// we collect that entries now here
rows = new RowSet(this.rowdef, is.length);
for (int j = 0; j < is.length; j++) {
L = is[j];
assert L.intValue() < file.size() : "L.intValue() = " + L.intValue() + ", file.size = " + file.size(); // prevent ooBounds Exception
d.add(L);
if (L.intValue() >= file.size()) continue; // prevent IndexOutOfBoundsException
file.get(L.intValue(), b, 0); // TODO: fix IndexOutOfBoundsException here
inconsistentEntry = rowdef.newEntry(b);
rows.addUnique(inconsistentEntry);
}
report.add(rows);
}
// finally delete the affected rows, but start with largest id first, otherwise we overwrite wrong entries
Long s;
while (d.size() > 0) {
s = d.last();
d.remove(s);
this.removeInFile(s.intValue());
if (System.currentTimeMillis() - lastlog > 30000) {
Log.logInfo("TABLE", "removing " + d.size() + " entries in " + this.filename());
lastlog = System.currentTimeMillis();
}
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
return report;
}
public void close() {
this.file.close();
this.file = null;
this.table = null;
this.index = null;
}
protected void finalize() {
if (this.file != null) this.close();
}
public String filename() {
return this.file.filename().toString();
}
public synchronized Entry get(final byte[] key) throws IOException {
if ((file == null) || (index == null)) return null;
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
final int i = (int) index.get(key);
if (i == -1) return null;
final byte[] b = new byte[rowdef.objectsize];
if (table == null) {
// read row from the file
file.get(i, b, 0);
} else {
// construct the row using the copy in RAM
final Row.Entry v = table.get(i, false);
assert v != null;
if (v == null) return null;
assert key.length == rowdef.primaryKeyLength;
System.arraycopy(key, 0, b, 0, key.length);
System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength, rowdef.objectsize - rowdef.primaryKeyLength);
}
return rowdef.newEntry(b);
}
public synchronized boolean has(final byte[] key) {
try {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
} catch (final IOException e) {
Log.logSevere("Table", "", e);
}
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
return index.has(key);
}
public synchronized CloneableIterator<byte[]> keys(final boolean up, final byte[] firstKey) throws IOException {
return index.keys(up, firstKey);
}
public synchronized Entry replace(final Entry row) throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
assert row != null;
assert row.bytes() != null;
if ((row == null) || (row.bytes() == null)) return null;
final int i = (int) index.get(row.getPrimaryKeyBytes());
if (i == -1) {
addUnique(row);
return null;
}
final byte[] b = new byte[rowdef.objectsize];
if (table == null) {
// read old value
file.get(i, b, 0);
// write new value
file.put(i, row.bytes(), 0);
} else {
// read old value
final Row.Entry v = table.get(i, false);
assert v != null;
System.arraycopy(row.getPrimaryKeyBytes(), 0, b, 0, rowdef.primaryKeyLength);
System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength, rowdef.objectsize - rowdef.primaryKeyLength);
// write new value
table.set(i, taildef.newEntry(row.bytes(), rowdef.primaryKeyLength, true));
file.put(i, row.bytes(), 0);
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
// return old value
return rowdef.newEntry(b);
}
public synchronized void put(final Entry row) throws IOException {
assert file == null || file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
assert row != null;
assert row.bytes() != null;
if (file == null || row == null || row.bytes() == null) return;
final int i = (int) index.get(row.getPrimaryKeyBytes());
if (i == -1) {
addUnique(row);
return;
}
if (table == null) {
// write new value
file.put(i, row.bytes(), 0);
} else {
// write new value
table.set(i, taildef.newEntry(row.bytes(), rowdef.primaryKeyLength, true));
file.put(i, row.bytes(), 0);
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
}
public synchronized Entry put(final Entry row, final Date entryDate) throws IOException {
return replace(row);
}
/**
* remove one entry from the file
* @param i an index position within the file (not a byte position)
* @throws IOException
*/
private void removeInFile(final int i) throws IOException {
assert i >= 0;
final byte[] p = new byte[rowdef.objectsize];
if (table == null) {
if (i == index.size() - 1) {
file.cleanLast();
} else {
file.cleanLast(p, 0);
file.put(i, p, 0);
final byte[] k = new byte[rowdef.primaryKeyLength];
System.arraycopy(p, 0, k, 0, rowdef.primaryKeyLength);
index.put(k, i);
}
} else {
if (i == index.size() - 1) {
// special handling if the entry is the last entry in the file
table.removeRow(i, false);
file.cleanLast();
} else {
// switch values
final Row.Entry te = table.removeOne();
table.set(i, te);
file.cleanLast(p, 0);
file.put(i, p, 0);
final Row.Entry lr = rowdef.newEntry(p);
index.put(lr.getPrimaryKeyBytes(), i);
}
}
}
public synchronized Entry remove(final byte[] key) throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
assert key.length == rowdef.primaryKeyLength;
final int i = (int) index.get(key);
if (i == -1) return null; // nothing to do
// prepare result
final byte[] b = new byte[rowdef.objectsize];
final byte[] p = new byte[rowdef.objectsize];
final int sb = index.size();
int ix;
assert i < index.size();
if (table == null) {
if (i == index.size() - 1) {
// element is at last entry position
ix = (int) index.remove(key);
assert ix == i;
file.cleanLast(b, 0);
} else {
// remove entry from index
assert i < index.size() - 1;
ix = (int) index.remove(key);
assert ix == i;
// read element that shall be removed
file.get(i, b, 0);
// fill the gap with value from last entry in file
file.cleanLast(p, 0);
file.put(i, p, 0);
final byte[] k = new byte[rowdef.primaryKeyLength];
System.arraycopy(p, 0, k, 0, rowdef.primaryKeyLength);
index.put(k, i);
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
} else {
// get result value from the table copy, so we don't need to read it from the file
final Row.Entry v = table.get(i, false);
System.arraycopy(key, 0, b, 0, key.length);
System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength, taildef.objectsize);
if (i == index.size() - 1) {
// special handling if the entry is the last entry in the file
ix = (int) index.remove(key);
assert ix == i;
table.removeRow(i, false);
file.cleanLast();
} else {
// remove entry from index
ix = (int) index.remove(key);
assert ix == i;
// switch values:
// remove last entry from the file copy to fill it in the gap
final Row.Entry te = table.removeOne();
// fill the gap in file copy
table.set(i, te);
// move entry from last entry in file to gap position
file.cleanLast(p, 0);
file.put(i, p, 0);
// set new index for moved entry in index
final Row.Entry lr = rowdef.newEntry(p);
index.put(lr.getPrimaryKeyBytes(), i);
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
}
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
assert index.size() + 1 == sb : "index.size() = " + index.size() + ", sb = " + sb;
return rowdef.newEntry(b);
}
public synchronized Entry removeOne() throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
final byte[] le = new byte[rowdef.objectsize];
long fsb = file.size();
assert fsb != 0 : "file.size() = " + fsb;
file.cleanLast(le, 0);
assert file.size() < fsb : "file.size() = " + file.size();
final Row.Entry lr = rowdef.newEntry(le);
final int i = (int) index.remove(lr.getPrimaryKeyBytes());
assert i >= 0;
if (table != null) table.removeOne();
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert table == null || table.size() == index.size() : "table.size() = " + table.size() + ", index.size() = " + index.size();
return lr;
}
public synchronized void clear() throws IOException {
final File f = file.filename();
file.close();
FileUtils.deletedelete(f);
// make new file
FileOutputStream fos = null;
try {
fos = new FileOutputStream(f);
} catch (final FileNotFoundException e) {
// should not happen
Log.logSevere("Table", "", e);
}
if (fos != null) try { fos.close(); } catch (final IOException e) {}
// open an existing table file
try {
this.file = new BufferedRecords(new Records(f, rowdef.objectsize), this.buffersize);
} catch (final FileNotFoundException e) {
// should never happen
Log.logSevere("Table", "", e);
}
// initialize index and copy table
table = (table == null) ? null : new RowSet(taildef, 1);
index = new HandleMap(rowdef.primaryKeyLength, rowdef.objectOrder, 4, 1, 100000);
}
public Row row() {
return this.rowdef;
}
public synchronized int size() {
return index.size();
}
public synchronized CloneableIterator<Entry> rows() throws IOException {
return new rowIteratorNoOrder();
}
public class rowIteratorNoOrder implements CloneableIterator<Entry> {
final Iterator<byte[]> ri;
public rowIteratorNoOrder() throws IOException {
ri = new ChunkIterator(tablefile, rowdef.objectsize, rowdef.objectsize);
}
public CloneableIterator<Entry> clone(Object modifier) {
try {
return new rowIteratorNoOrder();
} catch (IOException e) {
Log.logSevere("Table", "", e);
return null;
}
}
public boolean hasNext() {
return ri.hasNext();
}
public Entry next() {
byte[] r = ri.next();
return rowdef.newEntry(r);
}
public void remove() {
throw new UnsupportedOperationException("no remove in row iterator");
}
}
public synchronized CloneableIterator<Entry> rows(final boolean up, final byte[] firstKey) throws IOException {
return new rowIterator(up, firstKey);
}
public class rowIterator implements CloneableIterator<Entry> {
Iterator<byte[]> i;
boolean up;
byte[] fk;
int c;
public rowIterator(final boolean up, final byte[] firstKey) {
this.up = up;
this.fk = firstKey;
this.i = index.keys(up, firstKey);
this.c = -1;
}
public CloneableIterator<Entry> clone(final Object modifier) {
return new rowIterator(up, fk);
}
public boolean hasNext() {
return i.hasNext();
}
public Entry next() {
final byte[] k = i.next();
assert k != null;
if (k == null) return null;
this.c = (int) index.get(k);
if (this.c < 0) throw new ConcurrentModificationException(); // this should only happen if the table was modified during the iteration
final byte[] b = new byte[rowdef.objectsize];
if (table == null) {
// read from file
try {
file.get(this.c, b, 0);
} catch (final IOException e) {
Log.logSevere("Table", "", e);
return null;
}
} else {
// compose from table and key
final Row.Entry v = table.get(this.c, false);
assert v != null;
if (v == null) return null;
System.arraycopy(k, 0, b, 0, rowdef.primaryKeyLength);
System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength, taildef.objectsize);
}
return rowdef.newEntry(b);
}
public void remove() {
throw new UnsupportedOperationException("no remove in TABLE");
}
}
private static byte[] testWord(final char c) {
return new byte[]{(byte) c, 32, 32, 32};
}
private static String[] permutations(final int letters) {
String p = "";
for (int i = 0; i < letters; i++) p = p + ((char) (('A') + i));
return permutations(p);
}
private static String[] permutations(final String source) {
if (source.length() == 0) return new String[0];
if (source.length() == 1) return new String[]{source};
final char c = source.charAt(0);
final String[] recres = permutations(source.substring(1));
final String[] result = new String[source.length() * recres.length];
for (int perm = 0; perm < recres.length; perm++) {
result[perm * source.length()] = c + recres[perm];
for (int pos = 1; pos < source.length() - 1; pos++) {
result[perm * source.length() + pos] = recres[perm].substring(0, pos) + c + recres[perm].substring(pos);
}
result[perm * source.length() + source.length() - 1] = recres[perm] + c;
}
return result;
}
private static ObjectIndex testTable(final File f, final String testentities, final boolean useTailCache, final boolean exceed134217727) throws IOException {
if (f.exists()) FileUtils.deletedelete(f);
final Row rowdef = new Row("byte[] a-4, byte[] b-4", NaturalOrder.naturalOrder);
final ObjectIndex tt = new Table(f, rowdef, 100, 0, useTailCache, exceed134217727);
byte[] b;
final Row.Entry row = rowdef.newEntry();
for (int i = 0; i < testentities.length(); i++) {
b = testWord(testentities.charAt(i));
row.setCol(0, b);
row.setCol(1, b);
tt.put(row);
}
return tt;
}
private static int countElements(final ObjectIndex t) {
int count = 0;
try {
final Iterator<Row.Entry> iter = t.rows();
Row.Entry row;
while (iter.hasNext()) {
count++;
row = iter.next();
if (row == null) System.out.println("ERROR! null element found");
// else System.out.println("counted element: " + new
// String(n.getKey()));
}
} catch (final IOException e) {
}
return count;
}
public static void bigtest(final int elements, final File testFile, final boolean useTailCache, final boolean exceed134217727) {
System.out.println("starting big test with " + elements + " elements:");
final long start = System.currentTimeMillis();
final String[] s = permutations(elements);
ObjectIndex tt;
try {
for (int i = 0; i < s.length; i++) {
System.out.println("*** probing tree " + i + " for permutation " + s[i]);
// generate tree and delete elements
tt = testTable(testFile, s[i], useTailCache, exceed134217727);
if (countElements(tt) != tt.size()) {
System.out.println("wrong size for " + s[i]);
}
tt.close();
for (int j = 0; j < s.length; j++) {
tt = testTable(testFile, s[i], useTailCache, exceed134217727);
// delete by permutation j
for (int elt = 0; elt < s[j].length(); elt++) {
tt.remove(testWord(s[j].charAt(elt)));
if (countElements(tt) != tt.size()) {
System.out.println("ERROR! wrong size for probe tree " + s[i] + "; probe delete " + s[j] + "; position " + elt);
}
}
tt.close();
}
}
System.out.println("FINISHED test after " + ((System.currentTimeMillis() - start) / 1000) + " seconds.");
} catch (final Exception e) {
e.printStackTrace();
System.out.println("TERMINATED");
}
}
public void print() throws IOException {
System.out.println("PRINTOUT of table, length=" + size());
Entry row;
byte[] key;
CloneableIterator<byte[]> i = keys(true, null);
while (i.hasNext()) {
System.out.print("row " + i + ": ");
key = i.next();
row = get(key);
System.out.println(row.toString());
}
System.out.println("EndOfTable");
}
public static void main(final String[] args) {
// open a file, add one entry and exit
final File f = new File(args[0]);
System.out.println("========= Testcase: no tail cache:");
bigtest(5, f, false, false);
System.out.println("========= Testcase: with tail cache:");
bigtest(5, f, true, true);
/*
kelondroRow row = new kelondroRow("byte[] key-4, byte[] x-5", kelondroNaturalOrder.naturalOrder, 0);
try {
kelondroTABLE t = new kelondroTABLE(f, row);
kelondroRow.Entry entry = row.newEntry();
entry.setCol(0, "abcd".getBytes());
entry.setCol(1, "dummy".getBytes());
t.put(entry);
t.close();
} catch (IOException e) {
e.printStackTrace();
}
*/
}
public void deleteOnExit() {
this.file.deleteOnExit();
}
}