2011-11-16 16:27:43 +01:00
// HTTPLoader.java
2010-03-11 16:43:06 +01:00
// ---------------
// (C) by Michael Peter Christen; mc@yacy.net
// first published on http://yacy.net
// Frankfurt, Germany, 2006
2005-09-07 15:18:34 +02:00
//
2009-09-23 23:26:14 +02:00
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
2005-04-21 12:31:40 +02:00
//
2010-03-11 16:43:06 +01:00
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
2005-04-21 12:31:40 +02:00
//
2010-03-11 16:43:06 +01:00
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
2005-04-21 12:31:40 +02:00
//
2010-03-11 16:43:06 +01:00
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2005-04-21 12:31:40 +02:00
2012-09-21 15:48:16 +02:00
package net.yacy.crawler.retrieval ;
2005-04-21 12:31:40 +02:00
import java.io.IOException ;
2006-09-04 08:09:20 +02:00
2013-09-15 00:30:23 +02:00
import net.yacy.cora.document.id.DigestURL ;
2013-09-17 15:27:02 +02:00
import net.yacy.cora.federate.solr.FailCategory ;
2011-04-26 15:35:29 +02:00
import net.yacy.cora.protocol.ClientIdentification ;
2010-08-23 14:32:02 +02:00
import net.yacy.cora.protocol.HeaderFramework ;
import net.yacy.cora.protocol.RequestHeader ;
import net.yacy.cora.protocol.ResponseHeader ;
2010-08-23 00:32:39 +02:00
import net.yacy.cora.protocol.http.HTTPClient ;
2013-07-09 14:28:25 +02:00
import net.yacy.cora.util.ConcurrentLog ;
2012-09-21 15:48:16 +02:00
import net.yacy.crawler.data.CrawlProfile ;
import net.yacy.crawler.data.Latency ;
2010-09-11 17:58:15 +02:00
import net.yacy.kelondro.io.ByteCount ;
2012-06-11 00:17:30 +02:00
import net.yacy.repository.Blacklist.BlacklistType ;
2011-09-25 18:59:06 +02:00
import net.yacy.search.Switchboard ;
2012-05-15 12:25:46 +02:00
import net.yacy.search.SwitchboardConstants ;
2012-09-21 15:48:16 +02:00
import net.yacy.server.http.AlternativeDomainNames ;
2005-04-21 12:31:40 +02:00
2008-05-06 02:32:41 +02:00
public final class HTTPLoader {
2005-11-04 14:41:51 +01:00
2008-05-24 13:04:44 +02:00
private static final String DEFAULT_ENCODING = " gzip,deflate " ;
private static final String DEFAULT_LANGUAGE = " en-us,en;q=0.5 " ;
private static final String DEFAULT_CHARSET = " ISO-8859-1,utf-8;q=0.7,*;q=0.7 " ;
2011-11-16 16:36:30 +01:00
public static final String DEFAULT_ACCEPT = " text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 " ;
2011-08-02 01:28:23 +02:00
public static final int DEFAULT_MAXFILESIZE = 1024 * 1024 * 10 ;
2008-05-24 13:04:44 +02:00
public static final int DEFAULT_CRAWLING_RETRY_COUNT = 5 ;
2011-11-16 16:27:43 +01:00
2006-09-04 11:00:18 +02:00
/ * *
* The socket timeout that should be used
* /
2008-08-02 14:12:04 +02:00
private final int socketTimeout ;
2009-07-19 22:37:44 +02:00
private final Switchboard sb ;
2013-07-09 14:28:25 +02:00
private final ConcurrentLog log ;
2011-11-16 16:27:43 +01:00
2013-07-09 14:28:25 +02:00
public HTTPLoader ( final Switchboard sb , final ConcurrentLog theLog ) {
2007-10-29 02:43:20 +01:00
this . sb = sb ;
this . log = theLog ;
2011-11-16 16:27:43 +01:00
2007-06-07 17:26:41 +02:00
// refreshing timeout value
2012-06-16 22:30:31 +02:00
this . socketTimeout = ( int ) sb . getConfigLong ( " crawler.clientTimeout " , 30000 ) ;
2011-11-16 16:27:43 +01:00
}
2013-08-22 14:23:47 +02:00
public Response load ( final Request entry , CrawlProfile profile , final int maxFileSize , final BlacklistType blacklistType , final ClientIdentification . Agent agent ) throws IOException {
2012-12-07 15:49:23 +01:00
Latency . updateBeforeLoad ( entry . url ( ) ) ;
2011-11-16 16:27:43 +01:00
final long start = System . currentTimeMillis ( ) ;
2013-08-22 14:23:47 +02:00
final Response doc = load ( entry , profile , DEFAULT_CRAWLING_RETRY_COUNT , maxFileSize , blacklistType , agent ) ;
2012-12-07 15:49:23 +01:00
Latency . updateAfterLoad ( entry . url ( ) , System . currentTimeMillis ( ) - start ) ;
2009-03-20 11:21:23 +01:00
return doc ;
2007-10-29 02:43:20 +01:00
}
2011-11-16 16:27:43 +01:00
2013-08-22 14:23:47 +02:00
private Response load ( final Request request , CrawlProfile profile , final int retryCount , final int maxFileSize , final BlacklistType blacklistType , final ClientIdentification . Agent agent ) throws IOException {
2005-11-04 14:41:51 +01:00
2007-10-29 02:43:20 +01:00
if ( retryCount < 0 ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . TEMPORARY_NETWORK_FAILURE , " retry counter exceeded " , - 1 ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " retry counter exceeded for URL " + request . url ( ) . toString ( ) + " . Processing aborted.$ " ) ;
2007-10-29 02:43:20 +01:00
}
2011-11-16 16:27:43 +01:00
2013-09-15 00:30:23 +02:00
DigestURL url = request . url ( ) ;
2011-11-16 16:27:43 +01:00
2010-11-22 20:12:51 +01:00
final String host = url . getHost ( ) ;
2009-09-22 16:39:06 +02:00
if ( host = = null | | host . length ( ) < 2 ) throw new IOException ( " host is not well-formed: ' " + host + " ' " ) ;
2010-11-22 20:12:51 +01:00
final String path = url . getFile ( ) ;
int port = url . getPort ( ) ;
final boolean ssl = url . getProtocol ( ) . equals ( " https " ) ;
2005-06-02 03:33:10 +02:00
if ( port < 0 ) port = ( ssl ) ? 443 : 80 ;
2011-11-16 16:27:43 +01:00
2005-09-02 14:09:45 +02:00
// check if url is in blacklist
2008-08-02 14:12:04 +02:00
final String hostlow = host . toLowerCase ( ) ;
2012-07-02 13:57:29 +02:00
if ( blacklistType ! = null & & Switchboard . urlBlacklist . isListed ( blacklistType , hostlow , path ) ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . FINAL_LOAD_CONTEXT , " url in blacklist " , - 1 ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " CRAWLER Rejecting URL ' " + request . url ( ) . toString ( ) + " '. URL is in blacklist.$ " ) ;
2005-11-04 14:41:51 +01:00
}
2011-11-16 16:27:43 +01:00
2010-11-22 20:12:51 +01:00
// resolve yacy and yacyh domains
2014-03-06 03:08:04 +01:00
final AlternativeDomainNames yacyResolver = this . sb . peers ;
2010-11-22 20:12:51 +01:00
if ( yacyResolver ! = null ) {
2011-11-16 16:27:43 +01:00
final String yAddress = yacyResolver . resolve ( host ) ;
2010-11-22 20:12:51 +01:00
if ( yAddress ! = null ) {
2013-09-15 00:30:23 +02:00
url = new DigestURL ( url . getProtocol ( ) + " :// " + yAddress + path ) ;
2010-11-22 20:12:51 +01:00
}
}
2011-11-16 16:27:43 +01:00
2005-06-02 03:33:10 +02:00
// take a file from the net
2009-07-19 23:59:29 +02:00
Response response = null ;
2011-11-16 16:27:43 +01:00
2009-07-19 23:59:29 +02:00
// create a request header
final RequestHeader requestHeader = new RequestHeader ( ) ;
2013-08-22 14:23:47 +02:00
requestHeader . put ( HeaderFramework . USER_AGENT , agent . userAgent ) ;
2013-09-15 00:30:23 +02:00
DigestURL refererURL = null ;
2012-06-28 14:27:29 +02:00
if ( request . referrerhash ( ) ! = null ) refererURL = this . sb . getURL ( request . referrerhash ( ) ) ;
2012-10-10 11:46:22 +02:00
if ( refererURL ! = null ) requestHeader . put ( RequestHeader . REFERER , refererURL . toNormalform ( true ) ) ;
2011-11-16 16:27:43 +01:00
requestHeader . put ( HeaderFramework . ACCEPT , this . sb . getConfig ( " crawler.http.accept " , DEFAULT_ACCEPT ) ) ;
requestHeader . put ( HeaderFramework . ACCEPT_LANGUAGE , this . sb . getConfig ( " crawler.http.acceptLanguage " , DEFAULT_LANGUAGE ) ) ;
requestHeader . put ( HeaderFramework . ACCEPT_CHARSET , this . sb . getConfig ( " crawler.http.acceptCharset " , DEFAULT_CHARSET ) ) ;
requestHeader . put ( HeaderFramework . ACCEPT_ENCODING , this . sb . getConfig ( " crawler.http.acceptEncoding " , DEFAULT_ENCODING ) ) ;
2005-11-04 14:41:51 +01:00
2009-07-19 23:59:29 +02:00
// HTTP-Client
2013-08-22 14:23:47 +02:00
final HTTPClient client = new HTTPClient ( agent ) ;
2010-11-21 23:46:12 +01:00
client . setRedirecting ( false ) ; // we want to handle redirection ourselves, so we don't index pages twice
2011-11-16 16:27:43 +01:00
client . setTimout ( this . socketTimeout ) ;
2010-07-27 03:16:26 +02:00
client . setHeader ( requestHeader . entrySet ( ) ) ;
2009-07-13 21:55:13 +02:00
2012-05-15 12:25:46 +02:00
// send request
2014-01-19 15:21:23 +01:00
final byte [ ] responseBody = client . GETbytes ( url , sb . getConfig ( SwitchboardConstants . ADMIN_ACCOUNT_USER_NAME , " admin " ) , sb . getConfig ( SwitchboardConstants . ADMIN_ACCOUNT_B64MD5 , " " ) , maxFileSize , false ) ;
2012-06-26 13:54:48 +02:00
final int statusCode = client . getHttpResponse ( ) . getStatusLine ( ) . getStatusCode ( ) ;
final ResponseHeader responseHeader = new ResponseHeader ( statusCode , client . getHttpResponse ( ) . getAllHeaders ( ) ) ;
2012-10-10 11:46:22 +02:00
String requestURLString = request . url ( ) . toNormalform ( true ) ;
2012-05-15 12:25:46 +02:00
2012-06-26 13:54:48 +02:00
// check redirection
if ( statusCode > 299 & & statusCode < 310 ) {
2011-11-16 16:27:43 +01:00
2012-06-26 13:54:48 +02:00
// read redirection URL
String redirectionUrlString = responseHeader . get ( HeaderFramework . LOCATION ) ;
redirectionUrlString = redirectionUrlString = = null ? " " : redirectionUrlString . trim ( ) ;
2012-07-10 22:59:03 +02:00
if ( redirectionUrlString . isEmpty ( ) ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . TEMPORARY_NETWORK_FAILURE , " no redirection url provided, field ' " + HeaderFramework . LOCATION + " ' is empty " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " REJECTED EMTPY REDIRECTION ' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ' for URL ' " + requestURLString + " '$ " ) ;
2012-06-26 13:54:48 +02:00
}
2009-07-13 21:55:13 +02:00
2012-06-26 13:54:48 +02:00
// normalize URL
2013-09-15 00:30:23 +02:00
final DigestURL redirectionUrl = DigestURL . newURL ( request . url ( ) , redirectionUrlString ) ;
2009-07-13 21:55:13 +02:00
2012-06-26 13:54:48 +02:00
// restart crawling with new url
2013-07-09 14:28:25 +02:00
this . log . info ( " CRAWLER Redirection detected (' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ') for URL " + requestURLString ) ;
this . log . info ( " CRAWLER ..Redirecting request to: " + redirectionUrl ) ;
2012-06-26 13:54:48 +02:00
2012-10-23 02:50:26 +02:00
this . sb . webStructure . generateCitationReference ( url , redirectionUrl ) ;
2012-06-26 13:54:48 +02:00
if ( this . sb . getConfigBool ( SwitchboardConstants . CRAWLER_RECORD_REDIRECTS , true ) ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . FINAL_REDIRECT_RULE , " redirect to " + redirectionUrlString , statusCode ) ;
2012-06-26 13:54:48 +02:00
}
if ( this . sb . getConfigBool ( SwitchboardConstants . CRAWLER_FOLLOW_REDIRECTS , true ) ) {
2013-05-17 13:59:37 +02:00
// if we are already doing a shutdown we don't need to retry crawling
if ( Thread . currentThread ( ) . isInterrupted ( ) ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . FINAL_LOAD_CONTEXT , " server shutdown " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " CRAWLER Retry of URL= " + requestURLString + " aborted because of server shutdown.$ " ) ;
2013-05-17 13:59:37 +02:00
}
// retry crawling with new url
request . redirectURL ( redirectionUrl ) ;
2013-08-22 14:23:47 +02:00
return load ( request , profile , retryCount - 1 , maxFileSize , blacklistType , agent ) ;
2012-05-15 12:25:46 +02:00
}
2012-07-05 08:44:39 +02:00
// we don't want to follow redirects
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . FINAL_PROCESS_CONTEXT , " redirection not wanted " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " REJECTED UNWANTED REDIRECTION ' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ' for URL ' " + requestURLString + " '$ " ) ;
2012-05-15 12:25:46 +02:00
} else if ( responseBody = = null ) {
// no response, reject file
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . TEMPORARY_NETWORK_FAILURE , " no response body " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " REJECTED EMPTY RESPONSE BODY ' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ' for URL ' " + requestURLString + " '$ " ) ;
2012-06-26 13:54:48 +02:00
} else if ( statusCode = = 200 | | statusCode = = 203 ) {
2012-05-15 12:25:46 +02:00
// the transfer is ok
// we write the new cache entry to file system directly
final long contentLength = responseBody . length ;
ByteCount . addAccountCount ( ByteCount . CRAWLER , contentLength ) ;
// check length again in case it was not possible to get the length before loading
2012-12-10 21:17:45 +01:00
if ( maxFileSize > = 0 & & contentLength > maxFileSize ) {
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . FINAL_PROCESS_CONTEXT , " file size limit exceeded " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " REJECTED URL " + request . url ( ) + " because file size ' " + contentLength + " ' exceeds max filesize limit of " + maxFileSize + " bytes. (GET)$ " ) ;
2009-04-13 23:21:47 +02:00
}
2012-05-15 12:25:46 +02:00
// create a new cache entry
response = new Response (
request ,
requestHeader ,
2012-06-25 18:17:31 +02:00
responseHeader ,
2012-05-15 12:25:46 +02:00
profile ,
2012-05-21 03:03:47 +02:00
false ,
2012-05-15 12:25:46 +02:00
responseBody
) ;
return response ;
} else {
// if the response has not the right response type then reject file
2014-04-17 13:21:43 +02:00
this . sb . crawlQueues . errorURL . push ( request . url ( ) , request . depth ( ) , profile , FailCategory . TEMPORARY_NETWORK_FAILURE , " wrong http status code " , statusCode ) ;
2014-04-10 09:08:59 +02:00
throw new IOException ( " REJECTED WRONG STATUS TYPE ' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ' for URL ' " + requestURLString + " '$ " ) ;
2012-05-15 12:25:46 +02:00
}
2005-06-02 03:33:10 +02:00
}
2011-11-16 16:27:43 +01:00
2013-08-22 14:23:47 +02:00
public static Response load ( final Request request , ClientIdentification . Agent agent ) throws IOException {
return load ( request , agent , 3 ) ;
2009-10-01 00:11:00 +02:00
}
2011-11-16 16:27:43 +01:00
2013-08-22 14:23:47 +02:00
private static Response load ( final Request request , ClientIdentification . Agent agent , final int retryCount ) throws IOException {
2009-10-01 00:11:00 +02:00
if ( retryCount < 0 ) {
throw new IOException ( " Redirection counter exceeded for URL " + request . url ( ) . toString ( ) + " . Processing aborted. " ) ;
}
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
final String host = request . url ( ) . getHost ( ) ;
if ( host = = null | | host . length ( ) < 2 ) throw new IOException ( " host is not well-formed: ' " + host + " ' " ) ;
final String path = request . url ( ) . getFile ( ) ;
int port = request . url ( ) . getPort ( ) ;
final boolean ssl = request . url ( ) . getProtocol ( ) . equals ( " https " ) ;
if ( port < 0 ) port = ( ssl ) ? 443 : 80 ;
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// check if url is in blacklist
final String hostlow = host . toLowerCase ( ) ;
2012-06-11 00:17:30 +02:00
if ( Switchboard . urlBlacklist ! = null & & Switchboard . urlBlacklist . isListed ( BlacklistType . CRAWLER , hostlow , path ) ) {
2009-10-01 00:11:00 +02:00
throw new IOException ( " CRAWLER Rejecting URL ' " + request . url ( ) . toString ( ) + " '. URL is in blacklist. " ) ;
}
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// take a file from the net
Response response = null ;
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// create a request header
final RequestHeader requestHeader = new RequestHeader ( ) ;
2013-08-22 14:23:47 +02:00
requestHeader . put ( HeaderFramework . USER_AGENT , agent . userAgent ) ;
2009-10-01 00:11:00 +02:00
requestHeader . put ( HeaderFramework . ACCEPT_LANGUAGE , DEFAULT_LANGUAGE ) ;
requestHeader . put ( HeaderFramework . ACCEPT_CHARSET , DEFAULT_CHARSET ) ;
requestHeader . put ( HeaderFramework . ACCEPT_ENCODING , DEFAULT_ENCODING ) ;
2013-08-22 14:23:47 +02:00
final HTTPClient client = new HTTPClient ( agent ) ;
2010-07-27 03:16:26 +02:00
client . setTimout ( 20000 ) ;
client . setHeader ( requestHeader . entrySet ( ) ) ;
2014-01-19 15:21:23 +01:00
final byte [ ] responseBody = client . GETbytes ( request . url ( ) , null , null , false ) ;
2012-06-25 18:17:31 +02:00
final int code = client . getHttpResponse ( ) . getStatusLine ( ) . getStatusCode ( ) ;
final ResponseHeader header = new ResponseHeader ( code , client . getHttpResponse ( ) . getAllHeaders ( ) ) ;
2009-10-01 00:11:00 +02:00
// FIXME: 30*-handling (bottom) is never reached
// we always get the final content because httpClient.followRedirects = true
2010-07-27 03:16:26 +02:00
if ( responseBody ! = null & & ( code = = 200 | | code = = 203 ) ) {
2009-10-01 00:11:00 +02:00
// the transfer is ok
2011-11-16 16:27:43 +01:00
2010-09-11 17:58:15 +02:00
//statistics:
ByteCount . addAccountCount ( ByteCount . CRAWLER , responseBody . length ) ;
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// we write the new cache entry to file system directly
// create a new cache entry
response = new Response (
request ,
requestHeader ,
2011-11-16 16:27:43 +01:00
header ,
2009-10-01 00:11:00 +02:00
null ,
2012-05-21 03:03:47 +02:00
false ,
2009-10-01 00:11:00 +02:00
responseBody
) ;
return response ;
2010-07-27 03:16:26 +02:00
} else if ( code > 299 & & code < 310 ) {
if ( header . containsKey ( HeaderFramework . LOCATION ) ) {
2009-10-01 00:11:00 +02:00
// getting redirection URL
2010-07-27 03:16:26 +02:00
String redirectionUrlString = header . get ( HeaderFramework . LOCATION ) ;
2009-10-01 00:11:00 +02:00
redirectionUrlString = redirectionUrlString . trim ( ) ;
2012-07-10 22:59:03 +02:00
if ( redirectionUrlString . isEmpty ( ) ) {
2009-10-01 00:11:00 +02:00
throw new IOException ( " CRAWLER Redirection of URL= " + request . url ( ) . toString ( ) + " aborted. Location header is empty. " ) ;
}
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// normalizing URL
2013-09-15 00:30:23 +02:00
final DigestURL redirectionUrl = DigestURL . newURL ( request . url ( ) , redirectionUrlString ) ;
2009-10-01 00:11:00 +02:00
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// if we are already doing a shutdown we don't need to retry crawling
if ( Thread . currentThread ( ) . isInterrupted ( ) ) {
throw new IOException ( " CRAWLER Retry of URL= " + request . url ( ) . toString ( ) + " aborted because of server shutdown. " ) ;
}
2011-11-16 16:27:43 +01:00
2009-10-01 00:11:00 +02:00
// retry crawling with new url
request . redirectURL ( redirectionUrl ) ;
2013-08-22 14:23:47 +02:00
return load ( request , agent , retryCount - 1 ) ;
2009-10-01 00:11:00 +02:00
}
} else {
// if the response has not the right response type then reject file
2010-07-27 03:16:26 +02:00
throw new IOException ( " REJECTED WRONG STATUS TYPE ' " + client . getHttpResponse ( ) . getStatusLine ( ) + " ' for URL " + request . url ( ) . toString ( ) ) ;
2009-10-01 00:11:00 +02:00
}
return response ;
}
2011-11-16 16:27:43 +01:00
2007-06-07 17:26:41 +02:00
}