2017-10-23 16:31:08 +02:00
|
|
|
package com.rarchives.ripme.ripper.rippers;
|
|
|
|
|
|
|
|
import java.io.IOException;
|
|
|
|
import java.net.MalformedURLException;
|
|
|
|
import java.net.URL;
|
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.List;
|
|
|
|
import java.util.regex.Matcher;
|
|
|
|
import java.util.regex.Pattern;
|
|
|
|
|
|
|
|
import org.jsoup.nodes.Document;
|
|
|
|
import org.jsoup.nodes.Element;
|
|
|
|
|
|
|
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
|
|
|
import com.rarchives.ripme.utils.Http;
|
|
|
|
|
|
|
|
public class Hentai2readRipper extends AbstractHTMLRipper {
|
|
|
|
String lastPage;
|
|
|
|
|
|
|
|
public Hentai2readRipper(URL url) throws IOException {
|
|
|
|
super(url);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getHost() {
|
|
|
|
return "hentai2read";
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getDomain() {
|
|
|
|
return "hentai2read.com";
|
|
|
|
}
|
|
|
|
|
2018-05-20 19:06:15 +02:00
|
|
|
@Override
|
|
|
|
public boolean hasQueueSupport() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean pageContainsAlbums(URL url) {
|
2018-06-03 03:14:41 +02:00
|
|
|
LOGGER.info("Page contains albums");
|
2018-05-20 19:06:15 +02:00
|
|
|
Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
|
|
|
Matcher mat = pat.matcher(url.toExternalForm());
|
|
|
|
if (mat.matches()) {
|
|
|
|
return true;
|
2017-10-23 16:31:08 +02:00
|
|
|
}
|
2018-05-20 19:06:15 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<String> getAlbumsToQueue(Document doc) {
|
|
|
|
List<String> urlsToAddToQueue = new ArrayList<>();
|
|
|
|
for (Element elem : doc.select(".nav-chapters > li > div.media > a")) {
|
|
|
|
urlsToAddToQueue.add(elem.attr("href"));
|
|
|
|
}
|
|
|
|
return urlsToAddToQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getGID(URL url) throws MalformedURLException {
|
|
|
|
Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d+)?/?");
|
|
|
|
Matcher m = p.matcher(url.toExternalForm());
|
|
|
|
if (m.matches()) {
|
|
|
|
return m.group(1) + "_" + m.group(2);
|
|
|
|
}
|
|
|
|
throw new MalformedURLException("Expected hentai2read.com URL format: " +
|
|
|
|
"hentai2read.com/COMICID - got " + url + " instead");
|
|
|
|
}
|
2017-10-23 16:31:08 +02:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public Document getFirstPage() throws IOException {
|
2018-05-16 17:38:49 +02:00
|
|
|
String thumbnailLink;
|
2018-05-12 05:56:02 +02:00
|
|
|
try {
|
2018-05-20 19:06:15 +02:00
|
|
|
// If the page contains albums we want to load the main page
|
|
|
|
if (pageContainsAlbums(url)) {
|
|
|
|
return Http.url(url).get();
|
|
|
|
}
|
2018-05-12 05:56:02 +02:00
|
|
|
Document tempDoc;
|
|
|
|
tempDoc = Http.url(url).get();
|
|
|
|
// Get the thumbnail page so we can rip all images without loading every page in the comic
|
2018-05-16 17:38:49 +02:00
|
|
|
thumbnailLink = tempDoc.select("div.col-xs-12 > div.reader-controls > div.controls-block > button > a").attr("href");
|
2018-05-16 17:46:36 +02:00
|
|
|
if (!thumbnailLink.equals("")) {
|
2018-05-16 17:38:49 +02:00
|
|
|
return Http.url(thumbnailLink).get();
|
|
|
|
} else {
|
|
|
|
return Http.url(tempDoc.select("a[data-original-title=Thumbnails").attr("href")).get();
|
|
|
|
}
|
2018-05-12 05:56:02 +02:00
|
|
|
} catch (IOException e) {
|
|
|
|
throw new IOException("Unable to get first page");
|
2017-10-23 16:31:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getAlbumTitle(URL url) throws MalformedURLException {
|
|
|
|
try {
|
2018-04-17 10:11:13 +02:00
|
|
|
return getHost() + "_" + getGID(url);
|
2017-10-23 16:31:08 +02:00
|
|
|
} catch (Exception e) {
|
|
|
|
// Fall back to default album naming convention
|
2018-06-03 03:14:41 +02:00
|
|
|
LOGGER.warn("Failed to get album title from " + url, e);
|
2017-10-23 16:31:08 +02:00
|
|
|
}
|
|
|
|
return super.getAlbumTitle(url);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<String> getURLsFromPage(Document doc) {
|
|
|
|
List<String> result = new ArrayList<String>();
|
|
|
|
for (Element el : doc.select("div.block-content > div > div.img-container > a > img.img-responsive")) {
|
|
|
|
String imageURL = "https:" + el.attr("src");
|
|
|
|
imageURL = imageURL.replace("hentaicdn.com", "static.hentaicdn.com");
|
|
|
|
imageURL = imageURL.replace("thumbnails/", "");
|
|
|
|
imageURL = imageURL.replace("tmb", "");
|
|
|
|
result.add(imageURL);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Document getNextPage(Document doc) throws IOException {
|
|
|
|
// Find next page
|
|
|
|
String nextUrl = "";
|
|
|
|
Element elem = doc.select("div.bg-white > ul.pagination > li > a").last();
|
|
|
|
if (elem == null) {
|
|
|
|
throw new IOException("No more pages");
|
|
|
|
}
|
|
|
|
nextUrl = elem.attr("href");
|
|
|
|
// We use the global lastPage to check if we've already ripped this page
|
|
|
|
// and is so we quit as there are no more pages
|
|
|
|
if (nextUrl.equals(lastPage)) {
|
|
|
|
throw new IOException("No more pages");
|
|
|
|
}
|
|
|
|
lastPage = nextUrl;
|
|
|
|
// Sleep for half a sec to avoid getting IP banned
|
|
|
|
sleep(500);
|
|
|
|
return Http.url(nextUrl).get();
|
2017-11-01 18:07:35 +01:00
|
|
|
}
|
2017-10-23 16:31:08 +02:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public void downloadURL(URL url, int index) {
|
|
|
|
addURLToDownload(url, getPrefix(index));
|
|
|
|
}
|
|
|
|
}
|