diff --git a/htroot/CrawlStart_p.html b/htroot/CrawlStart_p.html index bafd8f784..038b2a5a3 100644 --- a/htroot/CrawlStart_p.html +++ b/htroot/CrawlStart_p.html @@ -16,7 +16,7 @@ You can define URLs as start points for Web page crawling and start crawling here. "Crawling" means that YaCy will download the given website, extract all links in it and then download the content behind these links. This is repeated as long as specified under "Crawling Depth".

-
+ @@ -31,8 +31,7 @@ @@ -48,7 +47,11 @@ - +
Attribut: - - +

empty
+ +
+ empty +
@@ -61,10 +64,14 @@ Create Bookmark : -     - : -
-

This option works with "Starting Point: From URL" only! + +    (works with "Starting Point: From URL" only) +

+ :    +

+ : + +
  This option lets you create a bookmark from your crawl start URL. For automatic re-crawling you can use the following default folders:
diff --git a/htroot/WatchCrawler_p.java b/htroot/WatchCrawler_p.java index 7d361e5b1..a107c77e4 100644 --- a/htroot/WatchCrawler_p.java +++ b/htroot/WatchCrawler_p.java @@ -216,7 +216,7 @@ public class WatchCrawler_p { if (post.get("createBookmark","off").equals("on")) { bookmarksDB.Bookmark bookmark = sb.bookmarksDB.createBookmark(crawlingStart, "admin"); if(bookmark != null){ - bookmark.setProperty(bookmarksDB.Bookmark.BOOKMARK_TITLE, crawlingStart); + bookmark.setProperty(bookmarksDB.Bookmark.BOOKMARK_TITLE, post.get("bookmarkTitle", crawlingStart)); bookmark.setOwner("admin"); bookmark.setPublic(false); bookmark.setTags(tags, true); diff --git a/htroot/js/IndexCreate.js b/htroot/js/IndexCreate.js index 8769a4f79..45229fd41 100644 --- a/htroot/js/IndexCreate.js +++ b/htroot/js/IndexCreate.js @@ -11,7 +11,8 @@ function handleResponse(){ if(response.getElementsByTagName("title")[0].firstChild!=null){ title=response.getElementsByTagName("title")[0].firstChild.nodeValue; } - document.getElementById("title").innerHTML=title; + // document.getElementById("title").innerHTML=title; + document.WatchCrawler.bookmarkTitle.value=title // deterime if crawling is allowed by the robots.txt robotsOK="";