yacy_search_server/source/de/anomic/crawler/retrieval/Response.java
orbiter a563b05b60 enhanced crawler:
- added a new queue 'noload' which can be filled with urls where it is already known that the content cannot be loaded. This may be because there is no parser available or the file is too big
- the noload queue is emptied with the parser process which indexes the file names only
- the 'start from file' functionality now also reads from ftp crawler

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@7368 6c8d7289-2bf4-0310-a012-ef5d649a1542
2010-12-11 00:31:57 +00:00

831 lines
35 KiB
Java
Executable File

// Response.java
// (C) 2008 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 19.08.2008 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.crawler.retrieval;
import java.io.ByteArrayInputStream;
import java.util.Date;
import net.yacy.cora.protocol.HeaderFramework;
import net.yacy.cora.protocol.RequestHeader;
import net.yacy.cora.protocol.ResponseHeader;
import net.yacy.document.Classification;
import net.yacy.document.Document;
import net.yacy.document.Parser;
import net.yacy.document.TextParser;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.util.DateFormatter;
import de.anomic.crawler.CrawlProfile;
public class Response {
// doctypes:
public static final char DT_PDFPS = 'p';
public static final char DT_TEXT = 't';
public static final char DT_HTML = 'h';
public static final char DT_DOC = 'd';
public static final char DT_IMAGE = 'i';
public static final char DT_MOVIE = 'm';
public static final char DT_FLASH = 'f';
public static final char DT_SHARE = 's';
public static final char DT_AUDIO = 'a';
public static final char DT_BINARY = 'b';
public static final char DT_UNKNOWN = 'u';
// the class objects
private final Request request;
private final RequestHeader requestHeader;
private final ResponseHeader responseHeader;
private final String responseStatus;
private final CrawlProfile profile;
private byte[] content;
private int status; // tracker indexing status, see status defs below
// doctype calculation
public static char docType(final DigestURI url) {
final String path = url.getPath().toLowerCase();
// serverLog.logFinest("PLASMA", "docType URL=" + path);
char doctype = DT_UNKNOWN;
if (path.endsWith(".gif")) { doctype = DT_IMAGE; }
else if (path.endsWith(".ico")) { doctype = DT_IMAGE; }
else if (path.endsWith(".bmp")) { doctype = DT_IMAGE; }
else if (path.endsWith(".jpg")) { doctype = DT_IMAGE; }
else if (path.endsWith(".jpeg")) { doctype = DT_IMAGE; }
else if (path.endsWith(".png")) { doctype = DT_IMAGE; }
else if (path.endsWith(".html")) { doctype = DT_HTML; }
else if (path.endsWith(".txt")) { doctype = DT_TEXT; }
else if (path.endsWith(".doc")) { doctype = DT_DOC; }
else if (path.endsWith(".rtf")) { doctype = DT_DOC; }
else if (path.endsWith(".pdf")) { doctype = DT_PDFPS; }
else if (path.endsWith(".ps")) { doctype = DT_PDFPS; }
else if (path.endsWith(".avi")) { doctype = DT_MOVIE; }
else if (path.endsWith(".mov")) { doctype = DT_MOVIE; }
else if (path.endsWith(".qt")) { doctype = DT_MOVIE; }
else if (path.endsWith(".mpg")) { doctype = DT_MOVIE; }
else if (path.endsWith(".md5")) { doctype = DT_SHARE; }
else if (path.endsWith(".mpeg")) { doctype = DT_MOVIE; }
else if (path.endsWith(".asf")) { doctype = DT_FLASH; }
return doctype;
}
public static char docType(final String mime) {
// serverLog.logFinest("PLASMA", "docType mime=" + mime);
char doctype = DT_UNKNOWN;
if (mime == null) doctype = DT_UNKNOWN;
else if (mime.startsWith("image/")) doctype = DT_IMAGE;
else if (mime.endsWith("/gif")) doctype = DT_IMAGE;
else if (mime.endsWith("/jpeg")) doctype = DT_IMAGE;
else if (mime.endsWith("/png")) doctype = DT_IMAGE;
else if (mime.endsWith("/html")) doctype = DT_HTML;
else if (mime.endsWith("/rtf")) doctype = DT_DOC;
else if (mime.endsWith("/pdf")) doctype = DT_PDFPS;
else if (mime.endsWith("/octet-stream")) doctype = DT_BINARY;
else if (mime.endsWith("/x-shockwave-flash")) doctype = DT_FLASH;
else if (mime.endsWith("/msword")) doctype = DT_DOC;
else if (mime.endsWith("/mspowerpoint")) doctype = DT_DOC;
else if (mime.endsWith("/postscript")) doctype = DT_PDFPS;
else if (mime.startsWith("text/")) doctype = DT_TEXT;
else if (mime.startsWith("image/")) doctype = DT_IMAGE;
else if (mime.startsWith("audio/")) doctype = DT_AUDIO;
else if (mime.startsWith("video/")) doctype = DT_MOVIE;
//bz2 = application/x-bzip2
//dvi = application/x-dvi
//gz = application/gzip
//hqx = application/mac-binhex40
//lha = application/x-lzh
//lzh = application/x-lzh
//pac = application/x-ns-proxy-autoconfig
//php = application/x-httpd-php
//phtml = application/x-httpd-php
//rss = application/xml
//tar = application/tar
//tex = application/x-tex
//tgz = application/tar
//torrent = application/x-bittorrent
//xhtml = application/xhtml+xml
//xla = application/msexcel
//xls = application/msexcel
//xsl = application/xml
//xml = application/xml
//Z = application/x-compress
//zip = application/zip
return doctype;
}
public static final int QUEUE_STATE_FRESH = 0;
public static final int QUEUE_STATE_PARSING = 1;
public static final int QUEUE_STATE_CONDENSING = 2;
public static final int QUEUE_STATE_STRUCTUREANALYSIS = 3;
public static final int QUEUE_STATE_INDEXSTORAGE = 4;
public static final int QUEUE_STATE_FINISHED = 5;
public Response(
final Request request,
final RequestHeader requestHeader,
final ResponseHeader responseHeader,
final String responseStatus,
final CrawlProfile profile,
final byte[] content) {
this.request = request;
// request and response headers may be zero in case that we process surrogates
this.requestHeader = requestHeader;
this.responseHeader = responseHeader;
this.responseStatus = responseStatus;
this.profile = profile;
this.status = QUEUE_STATE_FRESH;
this.content = content;
}
public Response(final Request request, final CrawlProfile profile) {
this.request = request;
// request and response headers may be zero in case that we process surrogates
this.requestHeader = new RequestHeader();
this.responseHeader = new ResponseHeader();
this.responseStatus = "200";
this.profile = profile;
this.status = QUEUE_STATE_FRESH;
this.content = request.url().toNormalform(true, true).getBytes();
}
public Response(
final Request request,
final RequestHeader requestHeader,
final ResponseHeader responseHeader,
final String responseStatus,
final CrawlProfile profile) {
this(request, requestHeader, responseHeader, responseStatus, profile, null);
}
public void updateStatus(final int newStatus) {
this.status = newStatus;
}
public ResponseHeader getResponseHeader() {
return this.responseHeader;
}
public int getStatus() {
return this.status;
}
public String name() {
// the anchor name; can be either the text inside the anchor tag or the
// page description after loading of the page
return this.request.name();
}
public DigestURI url() {
return this.request.url();
}
public char docType() {
char doctype = docType(getMimeType());
if (doctype == DT_UNKNOWN) doctype = docType(url());
return doctype;
}
public Date lastModified() {
Date docDate = null;
if (responseHeader != null) {
docDate = responseHeader.lastModified();
if (docDate == null) docDate = responseHeader.date();
}
if (docDate == null && request != null) docDate = request.appdate();
if (docDate == null) docDate = new Date(DateFormatter.correctedUTCTime());
return docDate;
}
public String language() {
// please avoid this method if a condenser document is available, because the condenser has a built-in language detection
// this here is only a guess using the TLD
return this.url().language();
}
public CrawlProfile profile() {
return this.profile;
}
public byte[] initiator() {
return this.request.initiator();
}
public boolean proxy() {
return initiator() == null;
}
public long size() {
if (this.content != null) return this.content.length;
if (this.responseHeader != null) {
// take the size from the response header
return this.responseHeader.getContentLength();
}
// the size is unknown
return -1;
}
public int depth() {
return this.request.depth();
}
public void setContent(final byte[] data) {
this.content = data;
}
public byte[] getContent() {
return this.content;
}
// the following three methods for cache read/write granting shall be as loose
// as possible but also as strict as necessary to enable caching of most items
/**
* @return NULL if the answer is TRUE, in case of FALSE, the reason as
* String is returned
*/
public String shallStoreCacheForProxy() {
String crawlerReason = shallStoreCacheForCrawler();
if (crawlerReason != null) return crawlerReason;
// check profile (disabled: we will check this in the plasmaSwitchboard)
// if (!this.profile.storeHTCache()) { return "storage_not_wanted"; }
// decide upon header information if a specific file should be stored to
// the cache or not
// if the storage was requested by prefetching, the request map is null
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable
// in caches
if (this.url().isPOST() && this.profile != null && !this.profile.crawlingQ()) {
return "dynamic_post";
}
if (this.url().isCGI()) {
return "dynamic_cgi";
}
if (this.url().isLocal()) {
return "local_URL_no_cache_needed";
}
if (responseHeader != null) {
// -if-modified-since in request
// we do not care about if-modified-since, because this case only occurres if the
// cache file does not exist, and we need as much info as possible for the indexing
// -cookies in request
// we do not care about cookies, because that would prevent loading more pages
// from one domain once a request resulted in a client-side stored cookie
// -set-cookie in response
// we do not care about cookies in responses, because that info comes along
// any/many pages from a server and does not express the validity of the page
// in modes of life-time/expiration or individuality
// -pragma in response
// if we have a pragma non-cache, we don't cache. usually if this is wanted from
// the server, it makes sense
String cacheControl = responseHeader.get(HeaderFramework.PRAGMA);
if (cacheControl != null && cacheControl.trim().toUpperCase().equals("NO-CACHE")) { return "controlled_no_cache"; }
// -expires in response
// we do not care about expires, because at the time this is called the data is
// obvious valid and that header info is used in the indexing later on
// -cache-control in response
// the cache-control has many value options.
cacheControl = responseHeader.get(HeaderFramework.CACHE_CONTROL);
if (cacheControl != null) {
cacheControl = cacheControl.trim().toUpperCase();
if (cacheControl.startsWith("MAX-AGE=")) {
// we need also the load date
final Date date = responseHeader.date();
if (date == null) return "stale_no_date_given_in_response";
try {
final long ttl = 1000 * Long.parseLong(cacheControl.substring(8)); // milliseconds to live
if (DateFormatter.correctedUTCTime() - date.getTime() > ttl) {
//System.out.println("***not indexed because cache-control");
return "stale_expired";
}
} catch (final Exception e) {
return "stale_error_" + e.getMessage() + ")";
}
}
}
}
return null;
}
public String shallStoreCacheForCrawler() {
// check storage size: all files will be handled in RAM before storage, so they must not exceed
// a given size, which we consider as 1MB
if (this.size() > 10 * 1024L * 1024L) return "too_large_for_caching_" + this.size();
// check status code
if (!validResponseStatus()) {
return "bad_status_" + this.responseStatus;
}
if (requestHeader != null) {
// -authorization cases in request
// authorization makes pages very individual, and therefore we cannot use the
// content in the cache
if (requestHeader.containsKey(RequestHeader.AUTHORIZATION)) { return "personalized"; }
// -ranges in request and response
// we do not cache partial content
if (requestHeader.containsKey(HeaderFramework.RANGE)) { return "partial_request"; }
}
if (responseHeader != null) {
// -ranges in request and response
// we do not cache partial content
if (responseHeader.containsKey(HeaderFramework.CONTENT_RANGE)) { return "partial_response"; }
}
return null;
}
/**
* decide upon header information if a specific file should be taken from
* the cache or not
*
* @return whether the file should be taken from the cache
*/
public boolean isFreshForProxy() {
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable
// in caches
if (this.url().isPOST()) {
return false;
}
if (this.url().isCGI()) {
return false;
}
String cacheControl;
if (requestHeader != null) {
// -authorization cases in request
if (requestHeader.containsKey(RequestHeader.AUTHORIZATION)) { return false; }
// -ranges in request
// we do not cache partial content
if (requestHeader.containsKey(HeaderFramework.RANGE)) { return false; }
// if the client requests a un-cached copy of the resource ...
cacheControl = requestHeader.get(HeaderFramework.PRAGMA);
if (cacheControl != null && cacheControl.trim().toUpperCase().equals("NO-CACHE")) { return false; }
cacheControl = requestHeader.get(HeaderFramework.CACHE_CONTROL);
if (cacheControl != null) {
cacheControl = cacheControl.trim().toUpperCase();
if (cacheControl.startsWith("NO-CACHE") || cacheControl.startsWith("MAX-AGE=0")) { return false; }
}
// -if-modified-since in request
// The entity has to be transferred only if it has
// been modified since the date given by the If-Modified-Since header.
if (requestHeader.containsKey(RequestHeader.IF_MODIFIED_SINCE)) {
// checking this makes only sense if the cached response contains
// a Last-Modified field. If the field does not exist, we go the safe way
if (!responseHeader.containsKey(HeaderFramework.LAST_MODIFIED)) { return false; }
// parse date
Date d1, d2;
d2 = responseHeader.lastModified(); if (d2 == null) { d2 = new Date(DateFormatter.correctedUTCTime()); }
d1 = requestHeader.ifModifiedSince(); if (d1 == null) { d1 = new Date(DateFormatter.correctedUTCTime()); }
// finally, we shall treat the cache as stale if the modification time is after the if-.. time
if (d2.after(d1)) { return false; }
}
final String mimeType = getMimeType();
if (!Classification.isPictureMime(mimeType)) {
// -cookies in request
// unfortunately, we should reload in case of a cookie
// but we think that pictures can still be considered as fresh
// -set-cookie in cached response
// this is a similar case as for COOKIE.
if (requestHeader.containsKey(RequestHeader.COOKIE) ||
responseHeader.containsKey(HeaderFramework.SET_COOKIE) ||
responseHeader.containsKey(HeaderFramework.SET_COOKIE2)) {
return false; // too strong
}
}
}
if (responseHeader != null) {
// -pragma in cached response
// logically, we would not need to care about no-cache pragmas in cached response headers,
// because they cannot exist since they are not written to the cache.
// So this IF should always fail..
cacheControl = responseHeader.get(HeaderFramework.PRAGMA);
if (cacheControl != null && cacheControl.trim().toUpperCase().equals("NO-CACHE")) { return false; }
// see for documentation also:
// http://www.web-caching.com/cacheability.html
// http://vancouver-webpages.com/CacheNow/
// look for freshnes information
// if we don't have any freshnes indication, we treat the file as stale.
// no handle for freshness control:
// -expires in cached response
// the expires value gives us a very easy hint when the cache is stale
final Date expires = responseHeader.expires();
if (expires != null) {
// System.out.println("EXPIRES-TEST: expires=" + expires + ", NOW=" + serverDate.correctedGMTDate() + ", url=" + url);
if (expires.before(new Date(DateFormatter.correctedUTCTime()))) { return false; }
}
final Date lastModified = responseHeader.lastModified();
cacheControl = responseHeader.get(HeaderFramework.CACHE_CONTROL);
if (cacheControl == null && lastModified == null && expires == null) { return false; }
// -lastModified in cached response
// we can apply a TTL (Time To Live) heuristic here. We call the time delta between the last read
// of the file and the last modified date as the age of the file. If we consider the file as
// middel-aged then, the maximum TTL would be cache-creation plus age.
// This would be a TTL factor of 100% we want no more than 10% TTL, so that a 10 month old cache
// file may only be treated as fresh for one more month, not more.
Date date = responseHeader.date();
if (lastModified != null) {
if (date == null) { date = new Date(DateFormatter.correctedUTCTime()); }
final long age = date.getTime() - lastModified.getTime();
if (age < 0) { return false; }
// TTL (Time-To-Live) is age/10 = (d2.getTime() - d1.getTime()) / 10
// the actual living-time is serverDate.correctedGMTDate().getTime() - d2.getTime()
// therefore the cache is stale, if serverDate.correctedGMTDate().getTime() - d2.getTime() > age/10
if (DateFormatter.correctedUTCTime() - date.getTime() > age / 10) { return false; }
}
// -cache-control in cached response
// the cache-control has many value options.
if (cacheControl != null) {
cacheControl = cacheControl.trim().toUpperCase();
if (cacheControl.startsWith("PRIVATE") ||
cacheControl.startsWith("NO-CACHE") ||
cacheControl.startsWith("NO-STORE")) {
// easy case
return false;
// } else if (cacheControl.startsWith("PUBLIC")) {
// // ok, do nothing
} else if (cacheControl.startsWith("MAX-AGE=")) {
// we need also the load date
if (date == null) { return false; }
try {
final long ttl = 1000 * Long.parseLong(cacheControl.substring(8)); // milliseconds to live
if (DateFormatter.correctedUTCTime() - date.getTime() > ttl) {
return false;
}
} catch (final Exception e) {
return false;
}
}
}
}
return true;
}
/**
* decide upon header information if a specific file should be indexed
* this method returns null if the answer is 'YES'!
* if the answer is 'NO' (do not index), it returns a string with the reason
* to reject the crawling demand in clear text
*
* This function is used by plasmaSwitchboard#processResourceStack
*/
public final String shallIndexCacheForProxy() {
if (profile() == null) {
return "shallIndexCacheForProxy: profile() is null !";
}
// check profile
if (!profile().indexText() && !profile().indexMedia()) {
return "indexing not allowed - indexText and indexMedia not set (for proxy = " + profile.name()+ ")";
}
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable in caches
if (!profile().crawlingQ()) {
if (url().isPOST()) {
return "Dynamic_(POST)";
}
if (url().isCGI()) {
return "Dynamic_(CGI)";
}
}
// -authorization cases in request
// we checked that in shallStoreCache
// -ranges in request
// we checked that in shallStoreCache
// a picture cannot be indexed
/*
if (Classification.isMediaExtension(url().getFileExtension())) {
return "Media_Content_(forbidden)";
}
*/
// -cookies in request
// unfortunately, we cannot index pages which have been requested with a cookie
// because the returned content may be special for the client
if (requestWithCookie()) {
// System.out.println("***not indexed because cookie");
return "Dynamic_(Requested_With_Cookie)";
}
if (responseHeader != null) {
// -set-cookie in response
// the set-cookie from the server does not indicate that the content is special
// thus we do not care about it here for indexing
// a picture cannot be indexed
final String mimeType = responseHeader.mime();
/*
if (Classification.isPictureMime(mimeType)) {
return "Media_Content_(Picture)";
}
*/
String parserError = TextParser.supportsMime(mimeType);
if (parserError != null) {
return "Media_Content, no parser: " + parserError;
}
// -if-modified-since in request
// if the page is fresh at the very moment we can index it
final Date ifModifiedSince = this.requestHeader.ifModifiedSince();
if ((ifModifiedSince != null) && (responseHeader.containsKey(HeaderFramework.LAST_MODIFIED))) {
// parse date
Date d = responseHeader.lastModified();
if (d == null) {
d = new Date(DateFormatter.correctedUTCTime());
}
// finally, we shall treat the cache as stale if the modification time is after the if-.. time
if (d.after(ifModifiedSince)) {
//System.out.println("***not indexed because if-modified-since");
return "Stale_(Last-Modified>Modified-Since)";
}
}
// -pragma in cached response
if (responseHeader.containsKey(HeaderFramework.PRAGMA) &&
(responseHeader.get(HeaderFramework.PRAGMA)).toUpperCase().equals("NO-CACHE")) {
return "Denied_(pragma_no_cache)";
}
// see for documentation also:
// http://www.web-caching.com/cacheability.html
// look for freshnes information
// -expires in cached response
// the expires value gives us a very easy hint when the cache is stale
// sometimes, the expires date is set to the past to prevent that a page is cached
// we use that information to see if we should index it
final Date expires = responseHeader.expires();
if (expires != null && expires.before(new Date(DateFormatter.correctedUTCTime()))) {
return "Stale_(Expired)";
}
// -lastModified in cached response
// this information is too weak to use it to prevent indexing
// even if we can apply a TTL heuristic for cache usage
// -cache-control in cached response
// the cache-control has many value options.
String cacheControl = responseHeader.get(HeaderFramework.CACHE_CONTROL);
if (cacheControl != null) {
cacheControl = cacheControl.trim().toUpperCase();
/* we have the following cases for cache-control:
"public" -- can be indexed
"private", "no-cache", "no-store" -- cannot be indexed
"max-age=<delta-seconds>" -- stale/fresh dependent on date
*/
if (cacheControl.startsWith("PRIVATE") ||
cacheControl.startsWith("NO-CACHE") ||
cacheControl.startsWith("NO-STORE")) {
// easy case
return "Stale_(denied_by_cache-control=" + cacheControl + ")";
// } else if (cacheControl.startsWith("PUBLIC")) {
// // ok, do nothing
} else if (cacheControl.startsWith("MAX-AGE=")) {
// we need also the load date
final Date date = responseHeader.date();
if (date == null) {
return "Stale_(no_date_given_in_response)";
}
try {
final long ttl = 1000 * Long.parseLong(cacheControl.substring(8)); // milliseconds to live
if (DateFormatter.correctedUTCTime() - date.getTime() > ttl) {
//System.out.println("***not indexed because cache-control");
return "Stale_(expired_by_cache-control)";
}
} catch (final Exception e) {
return "Error_(" + e.getMessage() + ")";
}
}
}
}
return null;
}
/**
* decide upon header information if a specific file should be indexed
* this method returns null if the answer is 'YES'!
* if the answer is 'NO' (do not index), it returns a string with the reason
* to reject the crawling demand in clear text
*
* This function is used by plasmaSwitchboard#processResourceStack
*/
public final String shallIndexCacheForCrawler() {
if (profile() == null) {
return "shallIndexCacheForCrawler: profile() is null !";
}
// check profile
if (!profile().indexText() && !profile().indexMedia()) {
return "indexing not allowed - indexText and indexMedia not set (for crawler = " + profile.name() + ")";
}
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable in caches
if (!profile().crawlingQ()) {
if (url().isPOST()) { return "Dynamic_(POST)"; }
if (url().isCGI()) { return "Dynamic_(CGI)"; }
}
// -authorization cases in request
// we checked that in shallStoreCache
// -ranges in request
// we checked that in shallStoreCache
// check if document can be indexed
if (responseHeader != null) {
final String mimeType = responseHeader.mime();
String parserError = TextParser.supportsMime(mimeType);
if (parserError != null && TextParser.supportsExtension(url()) != null) return "no parser available: " + parserError;
}
/*
if (Classification.isMediaExtension(url().getFileExtension()) &&
!Classification.isImageExtension((url().getFileExtension()))) {
return "Media_Content_(forbidden)";
}
*/
// -if-modified-since in request
// if the page is fresh at the very moment we can index it
// -> this does not apply for the crawler
// -cookies in request
// unfortunately, we cannot index pages which have been requested with a cookie
// because the returned content may be special for the client
// -> this does not apply for a crawler
// -set-cookie in response
// the set-cookie from the server does not indicate that the content is special
// thus we do not care about it here for indexing
// -> this does not apply for a crawler
// -pragma in cached response
// -> in the crawler we ignore this
// look for freshnes information
// -expires in cached response
// the expires value gives us a very easy hint when the cache is stale
// sometimes, the expires date is set to the past to prevent that a page is cached
// we use that information to see if we should index it
// -> this does not apply for a crawler
// -lastModified in cached response
// this information is too weak to use it to prevent indexing
// even if we can apply a TTL heuristic for cache usage
// -cache-control in cached response
// the cache-control has many value options.
// -> in the crawler we ignore this
return null;
}
public String getMimeType() {
if (responseHeader == null) return null;
String mimeType = responseHeader.mime();
mimeType = mimeType.trim().toLowerCase();
final int pos = mimeType.indexOf(';');
return ((pos < 0) ? mimeType : mimeType.substring(0, pos));
}
public String getCharacterEncoding() {
if (responseHeader == null) return null;
return responseHeader.getCharacterEncoding();
}
public DigestURI referrerURL() {
if (requestHeader == null) return null;
try {
String r = requestHeader.get(RequestHeader.REFERER, null);
if (r == null) return null;
return new DigestURI(r);
} catch (final Exception e) {
return null;
}
}
public byte[] referrerHash() {
if (requestHeader == null) return null;
String u = requestHeader.get(RequestHeader.REFERER, "");
if (u == null || u.length() == 0) return null;
try {
return new DigestURI(u).hash();
} catch (final Exception e) {
return null;
}
}
public boolean validResponseStatus() {
return (responseStatus == null) ? false : responseStatus.startsWith("200") || responseStatus.startsWith("203");
}
public Date ifModifiedSince() {
return (requestHeader == null) ? null : requestHeader.ifModifiedSince();
}
public boolean requestWithCookie() {
return (requestHeader == null) ? false : requestHeader.containsKey(RequestHeader.COOKIE);
}
public boolean requestProhibitsIndexing() {
return (requestHeader == null)
? false
: requestHeader.containsKey(HeaderFramework.X_YACY_INDEX_CONTROL) &&
(requestHeader.get(HeaderFramework.X_YACY_INDEX_CONTROL)).toUpperCase().equals("NO-INDEX");
}
public EventOrigin processCase(String mySeedHash) {
// we must distinguish the following cases: resource-load was initiated by
// 1) global crawling: the index is extern, not here (not possible here)
// 2) result of search queries, some indexes are here (not possible here)
// 3) result of index transfer, some of them are here (not possible here)
// 4) proxy-load (initiator is "------------")
// 5) local prefetch/crawling (initiator is own seedHash)
// 6) local fetching for global crawling (other known or unknwon initiator)
EventOrigin processCase = EventOrigin.UNKNOWN;
// FIXME the equals seems to be incorrect: String.equals(boolean)
if ((initiator() == null) || initiator().length == 0 || new String(initiator()).equals("------------")) {
// proxy-load
processCase = EventOrigin.PROXY_LOAD;
} else if (new String(initiator()).equals(mySeedHash)) {
// normal crawling
processCase = EventOrigin.LOCAL_CRAWLING;
} else {
// this was done for remote peer (a global crawl)
processCase = EventOrigin.GLOBAL_CRAWLING;
}
return processCase;
}
public Document[] parse() throws Parser.Failure {
String supportError = TextParser.supports(url(), this.responseHeader == null ? null : this.responseHeader.mime());
if (supportError != null) throw new Parser.Failure("no parser support:" + supportError, url());
try {
return TextParser.parseSource(url(), this.responseHeader == null ? null : this.responseHeader.mime(), this.responseHeader == null ? "UTF-8" : this.responseHeader.getCharacterEncoding(), this.content.length, new ByteArrayInputStream(this.content));
} catch (Exception e) {
return null;
}
}
}