Merge pull request #602 from cyian-1756/SankakuComplexRipper

SankakuComplexRipper can now download from different subdomains
This commit is contained in:
cyian-1756 2018-05-20 13:39:07 -04:00 committed by GitHub
commit 73276495c5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 30 additions and 3 deletions

View File

@ -43,7 +43,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
Matcher m = p.matcher(url.toExternalForm()); Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) { if (m.matches()) {
try { try {
return URLDecoder.decode(m.group(2), "UTF-8"); return URLDecoder.decode(m.group(1) + "_" + m.group(2), "UTF-8");
} catch (UnsupportedEncodingException e) { } catch (UnsupportedEncodingException e) {
throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'"); throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'");
} }
@ -53,6 +53,20 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
url + "instead"); url + "instead");
} }
public String getSubDomain(URL url){
Pattern p = Pattern.compile("^https?://([a-zA-Z0-9]+\\.)?sankakucomplex\\.com/.*tags=([^&]+).*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
try {
return URLDecoder.decode(m.group(1), "UTF-8");
} catch (UnsupportedEncodingException e) {
return null;
}
}
return null;
}
@Override @Override
public Document getFirstPage() throws IOException { public Document getFirstPage() throws IOException {
if (albumDoc == null) { if (albumDoc == null) {
@ -71,9 +85,11 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) { for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) {
String postLink = thumbSpan.attr("href"); String postLink = thumbSpan.attr("href");
try { try {
String subDomain = getSubDomain(url);
String siteURL = "https://" + subDomain + "sankakucomplex.com";
// Get the page the full sized image is on // Get the page the full sized image is on
Document subPage = Http.url("https://chan.sankakucomplex.com" + postLink).get(); Document subPage = Http.url(siteURL + postLink).get();
logger.info("Checking page " + "https://chan.sankakucomplex.com" + postLink); logger.info("Checking page " + siteURL + postLink);
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href")); imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
} catch (IOException e) { } catch (IOException e) {
logger.warn("Error while loading page " + postLink, e); logger.warn("Error while loading page " + postLink, e);

View File

@ -17,4 +17,15 @@ public class SankakuComplexRipperTest extends RippersTest {
testRipper(ripper); testRipper(ripper);
} }
*/ */
public void testgetGID() throws IOException {
URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
SankakuComplexRipper ripper = new SankakuComplexRipper(url);
assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url));
}
public void testgetSubDomain() throws IOException {
URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
SankakuComplexRipper ripper = new SankakuComplexRipper(url);
assertEquals("idol.", ripper.getSubDomain(url));
}
} }