Myhentaicomics ripper now rips all pages from tag and searches (#540)

* Now starts download right away when downloading from tags and searches
* Now rips from searches/tags ASAP
This commit is contained in:
cyian-1756 2017-05-23 12:59:49 -04:00 committed by metaprime
parent 89df4d7812
commit 0c507c23aa

View File

@ -11,6 +11,8 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.util.Arrays;
public class MyhentaicomicsRipper extends AbstractHTMLRipper {
public static boolean isTag;
@ -45,7 +47,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return ma.group(1);
}
Pattern pat = Pattern.compile("^http://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+\\?=:]*)?$");
Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+\\?=:]*)?$");
Matcher mat = pat.matcher(url.toExternalForm());
if (mat.matches()) {
isTag = true;
@ -113,13 +115,19 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return albumPagesList;
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> result = new ArrayList<String>();
public List<String> getAlbumsFromPage(String url) {
List<String> pagesToRip;
// Checks if this is a comic page or a page of albums
if (doc.toString().contains("class=\"g-item g-album\"")) {
List<String> result = new ArrayList<String>();
logger.info("Running getAlbumsFromPage");
Document doc;
try {
doc = Http.url("http://myhentaicomics.com" + url).get();
} catch(IOException e){
logger.warn("Failed to log link in Jsoup");
doc = null;
e.printStackTrace();
}
// This for goes over every album on the page
for (Element elem : doc.select("li.g-album > a")) {
String link = elem.attr("href");
logger.info("Grabbing album " + link);
@ -129,8 +137,8 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Document album_doc;
try {
logger.info("grabbing " + element + " with jsoup");
boolean startsWithhttp = element.startsWith("http");
if (startsWithhttp == false) {
boolean startsWithHttp = element.startsWith("http://");
if (!startsWithHttp) {
album_doc = Http.url("http://myhentaicomics.com/" + element).get();
}
else {
@ -144,8 +152,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
for (Element el :album_doc.select("img")) {
String imageSource = el.attr("src");
// This bool is here so we don't try and download the site logo
boolean b = imageSource.startsWith("http");
if (b == false) {
if (!imageSource.startsWith("http://")) {
// We replace thumbs with resizes so we can the full sized images
imageSource = imageSource.replace("thumbs", "resizes");
String url_string = "http://myhentaicomics.com/" + imageSource;
@ -157,13 +164,14 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
if (isTag == true) {
logger.info("Downloading from a tag or search");
try {
sleep(500);
result.add("http://myhentaicomics.com/" + imageSource);
addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]);
}
catch(MalformedURLException e) {
logger.warn("Malformed URL");
e.printStackTrace();
}
result.add("http://myhentaicomics.com/" + imageSource);
}
}
}
@ -171,12 +179,69 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
}
return result;
}
public List<String> getListOfPages(Document doc) {
List<String> pages = new ArrayList<String>();
// Get the link from the last button
String nextPageUrl = doc.select("a.ui-icon-right").last().attr("href");
Pattern pat = Pattern.compile("\\/index\\.php\\/tag\\/[0-9]*\\/[a-zA-Z0-9_\\-\\:+]*\\?page=(\\d+)");
Matcher mat = pat.matcher(nextPageUrl);
if (mat.matches()) {
logger.debug("Getting pages from a tag");
String base_link = mat.group(0).replaceAll("\\?page=\\d+", "");
logger.debug("base_link is " + base_link);
int numOfPages = Integer.parseInt(mat.group(1));
for (int x = 1; x != numOfPages +1; x++) {
logger.debug("running loop");
String link = base_link + "?page=" + Integer.toString(x);
pages.add(link);
}
} else {
Pattern pa = Pattern.compile("\\/index\\.php\\/search\\?q=[a-zA-Z0-9_\\-\\:]*\\&page=(\\d+)");
Matcher ma = pa.matcher(nextPageUrl);
if (ma.matches()) {
logger.debug("Getting pages from a search");
String base_link = ma.group(0).replaceAll("page=\\d+", "");
logger.debug("base_link is " + base_link);
int numOfPages = Integer.parseInt(ma.group(1));
for (int x = 1; x != numOfPages +1; x++) {
logger.debug("running loop");
String link = base_link + "page=" + Integer.toString(x);
logger.debug(link);
pages.add(link);
}
}
}
return pages;
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> result = new ArrayList<String>();
// Checks if this is a comic page or a page of albums
// If true the page is a page of albums
if (doc.toString().contains("class=\"g-item g-album\"")) {
// This if checks that there is more than 1 page
if (doc.select("a.ui-icon-right").last().attr("href") != "") {
// There is more than one page so we call getListOfPages
List<String> pagesToRip = getListOfPages(doc);
logger.debug("Pages to rip = " + pagesToRip);
for (String url : pagesToRip) {
logger.debug("Getting albums from " + url);
result = getAlbumsFromPage(url);
}
} else {
logger.debug("There is only one page on this page of albums");
// There is only 1 page so we call getAlbumsFromPage and pass it the page url
result = getAlbumsFromPage(doc.select("div.g-description > a").attr("href"));
}
return result;
}
else {
for (Element el : doc.select("img")) {
String imageSource = el.attr("src");
// This bool is here so we don't try and download the site logo
boolean b = imageSource.startsWith("http");
if (b == false) {
if (!imageSource.startsWith("http://")) {
// We replace thumbs with resizes so we can the full sized images
imageSource = imageSource.replace("thumbs", "resizes");
result.add("http://myhentaicomics.com/" + imageSource);