2014-10-15 07:27:51 +02:00
|
|
|
package com.rarchives.ripme.ripper.rippers;
|
|
|
|
|
|
|
|
import java.io.IOException;
|
|
|
|
import java.io.UnsupportedEncodingException;
|
|
|
|
import java.net.MalformedURLException;
|
|
|
|
import java.net.URL;
|
|
|
|
import java.net.URLDecoder;
|
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.HashMap;
|
|
|
|
import java.util.List;
|
|
|
|
import java.util.Map;
|
|
|
|
import java.util.regex.Matcher;
|
|
|
|
import java.util.regex.Pattern;
|
|
|
|
|
|
|
|
import org.jsoup.Connection.Response;
|
|
|
|
import org.jsoup.nodes.Document;
|
|
|
|
import org.jsoup.nodes.Element;
|
|
|
|
|
|
|
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
|
|
|
import com.rarchives.ripme.utils.Http;
|
|
|
|
|
|
|
|
public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|
|
|
private Document albumDoc = null;
|
2017-10-24 16:33:28 +02:00
|
|
|
private Map<String,String> cookies = new HashMap<>();
|
2014-10-15 07:27:51 +02:00
|
|
|
|
|
|
|
public SankakuComplexRipper(URL url) throws IOException {
|
|
|
|
super(url);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getHost() {
|
|
|
|
return "sankakucomplex";
|
|
|
|
}
|
2017-06-19 19:32:57 +02:00
|
|
|
|
2014-10-15 07:27:51 +02:00
|
|
|
@Override
|
|
|
|
public String getDomain() {
|
2015-02-10 08:29:29 +01:00
|
|
|
return "sankakucomplex.com";
|
2014-10-15 07:27:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String getGID(URL url) throws MalformedURLException {
|
2015-02-10 08:29:29 +01:00
|
|
|
Pattern p = Pattern.compile("^https?://([a-zA-Z0-9]+\\.)?sankakucomplex\\.com/.*tags=([^&]+).*$");
|
2014-10-15 07:27:51 +02:00
|
|
|
Matcher m = p.matcher(url.toExternalForm());
|
|
|
|
if (m.matches()) {
|
|
|
|
try {
|
2017-10-15 05:46:17 +02:00
|
|
|
return URLDecoder.decode(m.group(2), "UTF-8");
|
2014-10-15 07:27:51 +02:00
|
|
|
} catch (UnsupportedEncodingException e) {
|
|
|
|
throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
throw new MalformedURLException("Expected sankakucomplex.com URL format: " +
|
|
|
|
"idol.sankakucomplex.com?...&tags=something... - got " +
|
|
|
|
url + "instead");
|
|
|
|
}
|
2017-06-19 19:32:57 +02:00
|
|
|
|
2014-10-15 07:27:51 +02:00
|
|
|
@Override
|
|
|
|
public Document getFirstPage() throws IOException {
|
|
|
|
if (albumDoc == null) {
|
|
|
|
Response resp = Http.url(url).response();
|
|
|
|
cookies.putAll(resp.cookies());
|
|
|
|
albumDoc = resp.parse();
|
|
|
|
}
|
|
|
|
return albumDoc;
|
|
|
|
}
|
2017-06-19 19:32:57 +02:00
|
|
|
|
2014-10-15 07:27:51 +02:00
|
|
|
@Override
|
|
|
|
public List<String> getURLsFromPage(Document doc) {
|
2017-10-24 16:33:28 +02:00
|
|
|
List<String> imageURLs = new ArrayList<>();
|
2014-10-15 07:27:51 +02:00
|
|
|
// Image URLs are basically thumbnail URLs with a different domain, a simple
|
|
|
|
// path replacement, and a ?xxxxxx post ID at the end (obtainable from the href)
|
2017-10-14 03:32:34 +02:00
|
|
|
for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) {
|
|
|
|
String postLink = thumbSpan.attr("href");
|
2017-10-15 01:21:31 +02:00
|
|
|
try {
|
|
|
|
// Get the page the full sized image is on
|
|
|
|
Document subPage = Http.url("https://chan.sankakucomplex.com" + postLink).get();
|
|
|
|
logger.info("Checking page " + "https://chan.sankakucomplex.com" + postLink);
|
2017-12-11 03:59:08 +01:00
|
|
|
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
2017-10-15 01:21:31 +02:00
|
|
|
} catch (IOException e) {
|
|
|
|
logger.warn("Error while loading page " + postLink, e);
|
|
|
|
}
|
2014-10-15 07:27:51 +02:00
|
|
|
}
|
|
|
|
return imageURLs;
|
|
|
|
}
|
2017-06-19 19:32:57 +02:00
|
|
|
|
2014-10-15 07:27:51 +02:00
|
|
|
@Override
|
|
|
|
public void downloadURL(URL url, int index) {
|
2017-10-15 01:21:31 +02:00
|
|
|
sleep(8000);
|
2017-10-14 03:32:34 +02:00
|
|
|
addURLToDownload(url, getPrefix(index));
|
2014-10-15 07:27:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Document getNextPage(Document doc) throws IOException {
|
|
|
|
Element pagination = doc.select("div.pagination").first();
|
|
|
|
if (pagination.hasAttr("next-page-url")) {
|
2017-10-15 01:21:31 +02:00
|
|
|
String nextPage = pagination.attr("abs:next-page-url");
|
|
|
|
// Only logged in users can see past page 25
|
|
|
|
// Trying to rip page 26 will throw a no images found error
|
2017-10-24 16:33:28 +02:00
|
|
|
if (!nextPage.contains("page=26")) {
|
2017-10-15 01:21:31 +02:00
|
|
|
logger.info("Getting next page: " + pagination.attr("abs:next-page-url"));
|
|
|
|
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
|
|
|
|
}
|
2014-10-15 07:27:51 +02:00
|
|
|
}
|
2017-10-15 01:21:31 +02:00
|
|
|
throw new IOException("No more pages");
|
2014-10-15 07:27:51 +02:00
|
|
|
}
|
|
|
|
}
|