yacy_search_server/htroot/rct_p.java
Michael Peter Christen fed26f33a8 enhanced timezone managament for indexed data:
to support the new time parser and search functions in YaCy a high
precision detection of date and time on the day is necessary. That
requires that the time zone of the document content and the time zone of
the user, doing a search, is detected. The time zone of the search
request is done automatically using the browsers time zone offset which
is delivered to the search request automatically and invisible to the
user. The time zone for the content of web pages cannot be detected
automatically and must be an attribute of crawl starts. The advanced
crawl start now provides an input field to set the time zone in minutes
as an offset number. All parsers must get a time zone offset passed, so
this required the change of the parser java api. A lot of other changes
had been made which corrects the wrong handling of dates in YaCy which
was to add a correction based on the time zone of the server. Now no
correction is added and all dates in YaCy are UTC/GMT time zone, a
normalized time zone for all peers.
2015-04-15 13:17:23 +02:00

126 lines
5.0 KiB
Java

// rct_p.java
// -----------------------
// (C) 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 28.11.2007 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import java.net.MalformedURLException;
import java.util.Date;
import java.util.Iterator;
import net.yacy.cora.document.feed.Hit;
import net.yacy.cora.document.feed.RSSFeed;
import net.yacy.cora.document.id.DigestURL;
import net.yacy.cora.protocol.RequestHeader;
import net.yacy.crawler.retrieval.Request;
import net.yacy.peers.DHTSelection;
import net.yacy.peers.Protocol;
import net.yacy.peers.Seed;
import net.yacy.search.Switchboard;
import net.yacy.server.serverObjects;
import net.yacy.server.serverSwitch;
public class rct_p {
public static serverObjects respond(@SuppressWarnings("unused") final RequestHeader header, final serverObjects post, final serverSwitch env) {
// return variable that accumulates replacements
final Switchboard sb = (Switchboard) env;
final serverObjects prop = new serverObjects();
if (post != null) {
if (post.containsKey("retrieve")) {
final String peerhash = post.get("peer", null);
final Seed seed = (peerhash == null) ? null : sb.peers.getConnected(peerhash);
final RSSFeed feed = (seed == null) ? null : Protocol.queryRemoteCrawlURLs(sb.peers, seed, 20, 60000);
if (feed != null) {
for (final Hit item: feed) {
//System.out.println("URL=" + item.getLink() + ", desc=" + item.getDescription() + ", pubDate=" + item.getPubDate());
// put url on remote crawl stack
DigestURL url;
try {
url = new DigestURL(item.getLink());
} catch (final MalformedURLException e) {
url = null;
}
Date loaddate;
loaddate = item.getPubDate();
final DigestURL referrer = null; // referrer needed!
final String urlRejectReason = sb.crawlStacker.urlInAcceptedDomain(url);
if (urlRejectReason == null) {
// stack url
if (sb.getLog().isFinest()) sb.getLog().finest("crawlOrder: stack: url='" + url + "'");
sb.crawlStacker.enqueueEntry(new Request(
peerhash.getBytes(),
url,
(referrer == null) ? null : referrer.hash(),
"REMOTE-CRAWLING",
loaddate,
sb.crawler.defaultRemoteProfile.handle(),
0,
sb.crawler.defaultRemoteProfile.timezoneOffset()));
} else {
env.getLog().warn("crawlOrder: Rejected URL '" + urlToString(url) + "': " + urlRejectReason);
}
}
}
}
}
listHosts(sb, prop);
// return rewrite properties
return prop;
}
/**
* @param url
* @return
*/
private static String urlToString(final DigestURL url) {
return (url == null ? "null" : url.toNormalform(true));
}
private static void listHosts(final Switchboard sb, final serverObjects prop) {
// list known hosts
Seed seed;
int hc = 0;
if (sb.peers != null && sb.peers.sizeConnected() > 0) {
final Iterator<Seed> e = DHTSelection.getProvidesRemoteCrawlURLs(sb.peers);
while (e.hasNext()) {
seed = e.next();
if (seed != null) {
prop.put("hosts_" + hc + "_hosthash", seed.hash);
prop.putHTML("hosts_" + hc + "_hostname", seed.hash + " " + seed.get(Seed.NAME, "nameless") + " (" + seed.getLong(Seed.RCOUNT, 0) + ")");
hc++;
}
}
prop.put("hosts", hc);
} else {
prop.put("hosts", "0");
}
}
}