yacy_search_server/source/de/anomic/plasma/parser/zip/zipParser.java
theli f17ce28b6d *) plasmaHTCache:
- method loadResourceContent defined as deprecated. 
     Please do not use this function to avoid OutOfMemory Exceptions 
     when loading large files
   - new function getResourceContentStream to get an inputstream of a cache file
   - new function getResourceContentLength to get the size of a cached file
*) httpc.java:
   - Bugfix: resource content was loaded into memory even if this was not requested
*) Crawler:
   - new option to hold loaded resource content in memory
   - adding option to use the worker class without the worker pool 
     (needed by the snippet fetcher)
*) plasmaSnippetCache
   - snippet loader does not use a crawl-worker from pool but uses
     a newly created instance to avoid blocking by normal crawling
     activity.
   - now operates on streams instead of byte arrays to avoid OutOfMemory 
     Exceptions when operating on large files 
   - snippet loader now forces the crawl-worker to keep the loaded
     resource in memory to avoid IO 
*) plasmaCondenser: adding new function getWords that can directly operate on input streams
*) Parsers
   - keep resource in memory whenever possible (to avoid IO)
   - when parsing from stream the content length must be passed to the parser function now.
     this length value is needed by the parsers to decide if the parsed resource content is to large
     to hold it in memory and must be stored to file 
   - AbstractParser.java: new function to pass the contentLength of a resource to the parsers
   


git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@2701 6c8d7289-2bf4-0310-a012-ef5d649a1542
2006-10-03 11:05:48 +00:00

242 lines
10 KiB
Java

//zipParser.java
//------------------------
//part of YaCy
//(C) by Michael Peter Christen; mc@anomic.de
//first published on http://www.anomic.de
//Frankfurt, Germany, 2005
//
//this file is contributed by Martin Thelian
//last major change: 16.05.2005
//
//This program is free software; you can redistribute it and/or modify
//it under the terms of the GNU General Public License as published by
//the Free Software Foundation; either version 2 of the License, or
//(at your option) any later version.
//
//This program is distributed in the hope that it will be useful,
//but WITHOUT ANY WARRANTY; without even the implied warranty of
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//GNU General Public License for more details.
//
//You should have received a copy of the GNU General Public License
//along with this program; if not, write to the Free Software
//Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
//Using this software in any meaning (reading, learning, copying, compiling,
//running) means that you agree that the Author(s) is (are) not responsible
//for cost, loss of data or any harm that may be caused directly or indirectly
//by usage of this softare or this documentation. The usage of this software
//is on your own risk. The installation and usage (starting/running) of this
//software may allow other people or application to access your computer and
//any attached devices and is highly dependent on the configuration of the
//software which must be done by the user of the software; the author(s) is
//(are) also not responsible for proper configuration and usage of the
//software, even if provoked by documentation provided together with
//the software.
//
//Any changes to this file according to the GPL as documented in the file
//gpl.txt aside this file in the shipment you received can be done to the
//lines that follows this copyright notice here, but changes must not be
//done inside the copyright notive above. A re-distribution must contain
//the intact and unchanged copyright notice.
//Contributions and changes to the program code must be marked as such.
package de.anomic.plasma.parser.zip;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.Map;
import java.util.TreeSet;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import de.anomic.net.URL;
import de.anomic.plasma.plasmaParser;
import de.anomic.plasma.plasmaParserDocument;
import de.anomic.plasma.parser.AbstractParser;
import de.anomic.plasma.parser.Parser;
import de.anomic.plasma.parser.ParserException;
import de.anomic.server.serverByteBuffer;
import de.anomic.server.serverFileUtils;
public class zipParser extends AbstractParser implements Parser {
/**
* a list of mime types that are supported by this parser class
* @see #getSupportedMimeTypes()
*/
public static final Hashtable SUPPORTED_MIME_TYPES = new Hashtable();
static {
SUPPORTED_MIME_TYPES.put("application/zip","zip");
SUPPORTED_MIME_TYPES.put("application/x-zip","zip");
SUPPORTED_MIME_TYPES.put("application/x-zip-compressed","zip");
SUPPORTED_MIME_TYPES.put("application/java-archive","jar");
}
/**
* a list of library names that are needed by this parser
* @see Parser#getLibxDependences()
*/
private static final String[] LIBX_DEPENDENCIES = new String[] {};
public zipParser() {
super(LIBX_DEPENDENCIES);
this.parserName = "Compressed Archive File Parser";
}
public Hashtable getSupportedMimeTypes() {
return SUPPORTED_MIME_TYPES;
}
public plasmaParserDocument parse(URL location, String mimeType, String charset, InputStream source) throws ParserException, InterruptedException {
long docTextLength = 0;
OutputStream docText = null;
File outputFile = null;
plasmaParserDocument subDoc = null;
try {
if ((this.contentLength == -1) || (this.contentLength > Parser.MAX_KEEP_IN_MEMORY_SIZE)) {
outputFile = File.createTempFile("zipParser",".tmp");
docText = new BufferedOutputStream(new FileOutputStream(outputFile));
} else {
docText = new serverByteBuffer();
}
StringBuffer docKeywords = new StringBuffer();
StringBuffer docShortTitle = new StringBuffer();
StringBuffer docLongTitle = new StringBuffer();
LinkedList docSections = new LinkedList();
StringBuffer docAbstrct = new StringBuffer();
Map docAnchors = new HashMap();
TreeSet docImages = new TreeSet();
// creating a new parser class to parse the unzipped content
plasmaParser theParser = new plasmaParser();
// looping through the contained files
ZipEntry entry;
ZipInputStream zippedContent = new ZipInputStream(source);
while ((entry = zippedContent.getNextEntry()) !=null) {
// check for interruption
checkInterruption();
// skip directories
if (entry.isDirectory()) continue;
// Get the entry name
String entryName = entry.getName();
int idx = entryName.lastIndexOf(".");
// getting the file extension
String entryExt = (idx > -1) ? entryName.substring(idx+1) : "";
// trying to determine the mimeType per file extension
String entryMime = plasmaParser.getMimeTypeByFileExt(entryExt);
// parsing the content
File subDocTempFile = null;
try {
// create the temp file
subDocTempFile = createTempFile(entryName);
// copy the data into the file
serverFileUtils.copy(zippedContent,subDocTempFile,entry.getSize());
// parsing the zip file entry
subDoc = theParser.parseSource(new URL(location,"#" + entryName),entryMime,null, subDocTempFile);
} catch (ParserException e) {
this.theLogger.logInfo("Unable to parse zip file entry '" + entryName + "'. " + e.getMessage());
} finally {
if (subDocTempFile != null) try {subDocTempFile.delete(); } catch(Exception ex){/* ignore this */}
}
if (subDoc == null) continue;
// merging all documents together
if (docKeywords.length() > 0) docKeywords.append(",");
docKeywords.append(subDoc.getKeywords(','));
if (docLongTitle.length() > 0) docLongTitle.append("\n");
docLongTitle.append(subDoc.getMainLongTitle());
if (docShortTitle.length() > 0) docShortTitle.append("\n");
docShortTitle.append(subDoc.getMainShortTitle());
docSections.addAll(Arrays.asList(subDoc.getSectionTitles()));
if (docAbstrct.length() > 0) docAbstrct.append("\n");
docAbstrct.append(subDoc.getAbstract());
if (subDoc.getTextLength() > 0) {
if (docTextLength > 0) docText.write('\n');
docTextLength += serverFileUtils.copy(subDoc.getText(), docText);
}
docAnchors.putAll(subDoc.getAnchors());
docImages.addAll(subDoc.getImages());
// release subdocument
subDoc.close();
subDoc = null;
}
plasmaParserDocument result = null;
if (docText instanceof serverByteBuffer) {
result = new plasmaParserDocument(
location,
mimeType,
null,
docKeywords.toString().split(" |,"),
docShortTitle.toString(),
docLongTitle.toString(),
(String[])docSections.toArray(new String[docSections.size()]),
docAbstrct.toString(),
((serverByteBuffer)docText).toByteArray(),
docAnchors,
docImages);
} else {
result = new plasmaParserDocument(
location,
mimeType,
null,
docKeywords.toString().split(" |,"),
docShortTitle.toString(),
docLongTitle.toString(),
(String[])docSections.toArray(new String[docSections.size()]),
docAbstrct.toString(),
outputFile,
docAnchors,
docImages);
}
return result;
} catch (Exception e) {
if (e instanceof InterruptedException) throw (InterruptedException) e;
if (e instanceof ParserException) throw (ParserException) e;
if (subDoc != null) subDoc.close();
// close the writer
if (docText != null) try { docText.close(); } catch (Exception ex) {/* ignore this */}
// delete the file
if (outputFile != null) try { outputFile.delete(); } catch (Exception ex) {/* ignore this */}
throw new ParserException("Unexpected error while parsing zip resource. " + e.getClass().getName() + ": "+ e.getMessage(),location);
}
}
public void reset() {
// Nothing todo here at the moment
super.reset();
}
}