yacy_search_server/htroot/QuickCrawlLink_p.java
orbiter 70dd26ec95 added the new crawl scheduling function to the crawl start menu:
- the scheduler extends the option for re-crawl timing. Many people misunderstood the re-crawl timing feature because that was just a criteria for the url double-check and not a scheduler. Now the scheduler setting is combined with the re-crawl setting and people will have the choice between no re-crawl, re-crawl as was possible so far and a scheduled re-crawl. The 'classic' re-crawl time is set automatically when the scheduling function is selected
- removed the bookmark-based scheduler. This scheduler was not able to transport all attributes of a crawl start and did therefore not support special crawling starts i.e. for forums and wikis
- since the old scheduler was not aber to crawl special forums and wikis, the must-not-match filter was statically fixed to all bad pages for these special use cases. Since the new scheduler can handle these filters, it is possible to remove the default settings for the filters
- removed the busy thread that was used to trigger the bookmark-based scheduler
- removed the crontab for the bookmark-based scheduler

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@7051 6c8d7289-2bf4-0310-a012-ef5d649a1542
2010-08-19 23:52:38 +00:00

204 lines
8.5 KiB
Java

//QuickCrawlLink_p.java
//-----------------------
//part of the AnomicHTTPD caching proxy
//(C) by Michael Peter Christen; mc@yacy.net
//first published on http://www.anomic.de
//Frankfurt, Germany, 2004
//
//This file was contributed by Martin Thelian
//$LastChangedDate$
//$LastChangedBy$
//$LastChangedRevision$
//
//This program is free software; you can redistribute it and/or modify
//it under the terms of the GNU General Public License as published by
//the Free Software Foundation; either version 2 of the License, or
//(at your option) any later version.
//
//This program is distributed in the hope that it will be useful,
//but WITHOUT ANY WARRANTY; without even the implied warranty of
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//GNU General Public License for more details.
//
//You should have received a copy of the GNU General Public License
//along with this program; if not, write to the Free Software
//Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//You must compile this file with
//javac -classpath .:../classes IndexCreate_p.java
//if the shell's current path is HTROOT
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URLDecoder;
import java.util.Date;
import net.yacy.kelondro.data.meta.DigestURI;
import net.yacy.kelondro.logging.Log;
import de.anomic.crawler.CrawlProfile;
import de.anomic.crawler.retrieval.Request;
import de.anomic.http.server.HeaderFramework;
import de.anomic.http.server.RequestHeader;
import de.anomic.search.Segment;
import de.anomic.search.Segments;
import de.anomic.search.Switchboard;
import de.anomic.server.serverObjects;
import de.anomic.server.serverSwitch;
public class QuickCrawlLink_p {
/**
* Example Javascript to call this servlet:
* <code>javascript:w = window.open('http://user:pwd@localhost:8080/QuickCrawlLink_p.html?indexText=on&indexMedia=on&crawlingQ=on&xdstopw=on&title=' + escape(document.title) + '&url=' + location.href,'_blank','height=150,width=500,resizable=yes,scrollbar=no,directory=no,menubar=no,location=no'); w.focus();</code>
* @param header the complete HTTP header of the request
* @param post any arguments for this servlet, the request carried with (GET as well as POST)
* @param env the serverSwitch object holding all runtime-data
* @return the rewrite-properties for the template
*/
public static serverObjects respond(final RequestHeader header, final serverObjects post, final serverSwitch env) {
final serverObjects prop = new serverObjects();
final Switchboard sb = (Switchboard) env;
// get segment
Segment indexSegment = null;
if (post != null && post.containsKey("segment")) {
String segmentName = post.get("segment");
if (sb.indexSegments.segmentExist(segmentName)) {
indexSegment = sb.indexSegments.segment(segmentName);
}
} else {
// take default segment
indexSegment = sb.indexSegments.segment(Segments.Process.PUBLIC);
}
if (post == null) {
// send back usage example
prop.put("mode", "0");
// get the http host header
final String hostSocket = header.get(HeaderFramework.CONNECTION_PROP_HOST);
//String host = hostSocket;
int port = 80;
final int pos = hostSocket.indexOf(":");
if (pos != -1) {
port = Integer.parseInt(hostSocket.substring(pos + 1));
//host = hostSocket.substring(0, pos);
}
prop.put("mode_host", "localhost");
prop.put("mode_port", port);
return prop;
}
prop.put("mode", "1");
// get the URL
String crawlingStart = post.get("url",null);
try {
crawlingStart = URLDecoder.decode(crawlingStart, "UTF-8");
} catch (final UnsupportedEncodingException e) {
Log.logException(e);
}
// get the browser title
final String title = post.get("title",null);
// get other parameters if set
final String crawlingMustMatch = post.get("mustmatch", CrawlProfile.MATCH_ALL);
final String crawlingMustNotMatch = post.get("mustnotmatch", CrawlProfile.MATCH_NEVER);
final int CrawlingDepth = Integer.parseInt(post.get("crawlingDepth", "0"));
final boolean crawlDynamic = post.get("crawlingQ", "").equals("on");
final boolean indexText = post.get("indexText", "on").equals("on");
final boolean indexMedia = post.get("indexMedia", "on").equals("on");
final boolean storeHTCache = post.get("storeHTCache", "").equals("on");
final boolean remoteIndexing = post.get("crawlOrder", "").equals("on");
final boolean xsstopw = post.get("xsstopw", "").equals("on");
final boolean xdstopw = post.get("xdstopw", "").equals("on");
final boolean xpstopw = post.get("xpstopw", "").equals("on");
prop.put("mode_url", (crawlingStart == null) ? "unknown" : crawlingStart);
prop.putHTML("mode_title", (title == null) ? "unknown" : title);
if (crawlingStart != null) {
crawlingStart = crawlingStart.trim();
try {crawlingStart = new DigestURI(crawlingStart, null).toNormalform(true, true);} catch (final MalformedURLException e1) {}
// check if url is proper
DigestURI crawlingStartURL = null;
try {
crawlingStartURL = new DigestURI(crawlingStart, null);
} catch (final MalformedURLException e) {
prop.put("mode_status", "1");
prop.put("mode_code", "1");
return prop;
}
final byte[] urlhash = crawlingStartURL.hash();
indexSegment.urlMetadata().remove(urlhash);
sb.crawlQueues.noticeURL.removeByURLHash(urlhash);
sb.crawlQueues.errorURL.remove(urlhash);
// create crawling profile
CrawlProfile.entry pe = null;
try {
pe = sb.crawler.profilesActiveCrawls.newEntry(
crawlingStartURL.getHost(),
crawlingStartURL,
crawlingMustMatch,
crawlingMustNotMatch,
CrawlingDepth,
60 * 24 * 30, // recrawlIfOlder (minutes); here: one month
-1, // domFilterDepth, if negative: no auto-filter
-1, // domMaxPages, if negative: no count restriction
crawlDynamic,
indexText,
indexMedia,
storeHTCache,
true,
remoteIndexing,
xsstopw,
xdstopw,
xpstopw,
CrawlProfile.CacheStrategy.IFFRESH
);
} catch (final Exception e) {
// mist
prop.put("mode_status", "2");//Error with url
prop.put("mode_code", "2");
prop.putHTML("mode_status_error", e.getMessage());
return prop;
}
// stack URL
String reasonString = null;
reasonString = sb.crawlStacker.stackCrawl(new Request(
sb.peers.mySeed().hash.getBytes(),
crawlingStartURL,
null,
(title==null)?"CRAWLING-ROOT":title,
new Date(),
pe.handle(),
0,
0,
0
));
// validate rejection reason
if (reasonString == null) {
prop.put("mode_status", "0");//start msg
prop.put("mode_code", "0");
} else {
prop.put("mode_status", "3");//start msg
prop.put("mode_code","3");
prop.putHTML("mode_status_error", reasonString);
}
}
return prop;
}
}