fix for sitemap importer: can now also import very large sitemaps within

small memory configurations
This commit is contained in:
Michael Peter Christen 2012-07-08 16:11:50 +02:00
parent 92731e5287
commit fbc1a2030d
2 changed files with 73 additions and 48 deletions

View File

@ -30,6 +30,7 @@ import java.util.Date;
import net.yacy.cora.document.ASCII;
import net.yacy.document.parser.sitemapParser;
import net.yacy.document.parser.sitemapParser.URLEntry;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.data.meta.URIMetadataRow;
import net.yacy.kelondro.logging.Log;
@ -56,7 +57,11 @@ public class SitemapImporter extends Thread {
try {
logger.logInfo("Start parsing sitemap file " + this.siteMapURL);
sitemapParser.SitemapReader parser = sitemapParser.parse(this.siteMapURL);
for (sitemapParser.URLEntry entry: parser) process(entry);
parser.start();
URLEntry item;
while ((item = parser.take()) != sitemapParser.POISON_URLEntry) {
process(item);
}
} catch (final Exception e) {
logger.logWarning("Unable to parse sitemap file " + this.siteMapURL, e);
}

View File

@ -33,10 +33,11 @@ import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.zip.GZIPInputStream;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import net.yacy.cora.date.ISO8601Formatter;
import net.yacy.cora.document.MultiProtocolURI;
@ -52,18 +53,17 @@ import net.yacy.document.TextParser;
import net.yacy.document.parser.html.ImageEntry;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.io.ByteCountInputStream;
import net.yacy.kelondro.logging.Log;
import org.w3c.dom.CharacterData;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
public class sitemapParser extends AbstractParser implements Parser {
public sitemapParser() {
super("RSS Parser");
super("sitemap Parser");
// unfortunately sitemap files have neither a mime type nor a typical file extension.
//SUPPORTED_EXTENSIONS.add("php");
//SUPPORTED_EXTENSIONS.add("xml");
@ -73,17 +73,13 @@ public class sitemapParser extends AbstractParser implements Parser {
public Document[] parse(final DigestURI url, final String mimeType,
final String charset, final InputStream source)
throws Failure, InterruptedException {
SitemapReader sitemap;
try {
sitemap = new SitemapReader(source);
} catch (IOException e) {
throw new Parser.Failure("Load error:" + e.getMessage(), url);
}
final List<Document> docs = new ArrayList<Document>();
SitemapReader sitemap = new SitemapReader(source);
sitemap.start();
DigestURI uri;
Document doc;
for (final URLEntry item: sitemap) try {
URLEntry item;
while ((item = sitemap.take()) != POISON_URLEntry) try {
uri = new DigestURI(item.loc);
doc = new Document(
uri,
@ -115,6 +111,7 @@ public class sitemapParser extends AbstractParser implements Parser {
public static SitemapReader parse(final DigestURI sitemapURL) throws IOException {
// download document
Log.logInfo("SitemapReader", "loading sitemap from " + sitemapURL.toNormalform(true, false));
final RequestHeader requestHeader = new RequestHeader();
requestHeader.put(HeaderFramework.USER_AGENT, ClientIdentification.getUserAgent());
final HTTPClient client = new HTTPClient();
@ -137,57 +134,79 @@ public class sitemapParser extends AbstractParser implements Parser {
contentStream = new GZIPInputStream(contentStream);
}
final ByteCountInputStream counterStream = new ByteCountInputStream(contentStream, null);
return sitemapParser.parse(counterStream);
return new SitemapReader(counterStream);
} catch (IOException e) {
throw e;
} finally {
client.finish();
}
}
public static SitemapReader parse(final InputStream stream) throws IOException {
return new SitemapReader(stream);
}
/**
* for schemas see:
* http://www.sitemaps.org/schemas/sitemap/0.9
* http://www.google.com/schemas/sitemap/0.84
*/
public static class SitemapReader extends ArrayList<URLEntry> {
private static final long serialVersionUID = 1337L;
public SitemapReader(final InputStream source) throws IOException {
org.w3c.dom.Document doc;
try { doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(source); }
catch (ParserConfigurationException e) { throw new IOException (e); }
catch (SAXParseException e) { throw new IOException (e); }
catch (SAXException e) { throw new IOException (e); }
catch (OutOfMemoryError e) { throw new IOException (e); }
NodeList sitemapNodes = doc.getElementsByTagName("sitemap");
for (int i = 0; i < sitemapNodes.getLength(); i++) {
String url = new SitemapEntry((Element) sitemapNodes.item(i)).url();
if (url != null && url.length() > 0) {
try {
final SitemapReader r = parse(new DigestURI(url));
for (final URLEntry ue: r) this.add(ue);
} catch (IOException e) {}
}
}
final NodeList urlEntryNodes = doc.getElementsByTagName("url");
for (int i = 0; i < urlEntryNodes.getLength(); i++) {
this.add(new URLEntry((Element) urlEntryNodes.item(i)));
}
public static class SitemapReader extends Thread {
private final InputStream source;
private final BlockingQueue<URLEntry> queue;
public SitemapReader(final InputStream source) {
this.source = source;
this.queue = new ArrayBlockingQueue<URLEntry>(10000);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(300);
for (final URLEntry entry: this) {
sb.append(entry.toString());
public void run() {
try {
org.w3c.dom.Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(this.source);
NodeList sitemapNodes = doc.getElementsByTagName("sitemap");
for (int i = 0; i < sitemapNodes.getLength(); i++) {
String url = new SitemapEntry((Element) sitemapNodes.item(i)).url();
if (url != null && url.length() > 0) {
try {
final SitemapReader r = parse(new DigestURI(url));
r.start();
URLEntry item;
while ((item = r.take()) != POISON_URLEntry) {
try {
this.queue.put(item);
} catch (InterruptedException e) {
break;
}
}
} catch (IOException e) {}
}
}
final NodeList urlEntryNodes = doc.getElementsByTagName("url");
for (int i = 0; i < urlEntryNodes.getLength(); i++) {
try {
this.queue.put(new URLEntry((Element) urlEntryNodes.item(i)));
} catch (InterruptedException e) {
break;
}
}
} catch (Throwable e) {
Log.logException(e);
}
try {
this.queue.put(POISON_URLEntry);
} catch (InterruptedException e) {
}
}
/**
* retrieve the next entry, waiting until one becomes available.
* if no more are available, POISON_URLEntry is returned
* @return the next entry from the sitemap or POISON_URLEntry if no more are there
*/
public URLEntry take() {
try {
return this.queue.take();
} catch (InterruptedException e) {
return POISON_URLEntry;
}
return sb.toString();
}
}
public final static URLEntry POISON_URLEntry = new URLEntry(null);
public static class URLEntry {
public String loc, lastmod, changefreq, priority;
@ -233,6 +252,7 @@ public class sitemapParser extends AbstractParser implements Parser {
}
private static String val(final Element parent, final String label, final String dflt) {
if (parent == null) return null;
final Element e = (Element) parent.getElementsByTagName(label).item(0);
if (e == null) return dflt;
final Node child = e.getFirstChild();