Merge pull request #562 from cyian-1756/sumuttyFixes
Added user support for smutty.com
This commit is contained in:
commit
11db3f76fa
@ -3,24 +3,19 @@ package com.rarchives.ripme.ripper.rippers;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
/**
|
||||
* Appears to be broken as of 2015-02-11.
|
||||
* Generating large image from thumbnail requires replacing "/m/" with something else:
|
||||
* -> Sometimes "/b/"
|
||||
* -> Sometimes "/p/"
|
||||
* No way to know without loading the image page.
|
||||
*/
|
||||
public class SmuttyRipper extends AlbumRipper {
|
||||
|
||||
public class SmuttyRipper extends AbstractHTMLRipper {
|
||||
|
||||
private static final String DOMAIN = "smutty.com",
|
||||
HOST = "smutty";
|
||||
@ -29,6 +24,16 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "smutty";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "smutty.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
return (url.getHost().endsWith(DOMAIN));
|
||||
@ -40,69 +45,57 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
int page = 0;
|
||||
String url, tag = getGID(this.url);
|
||||
boolean hasNextPage = true;
|
||||
while (hasNextPage) {
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> results = new ArrayList<>();
|
||||
for (Element image : doc.select("a.l > img")) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
page++;
|
||||
url = "http://smutty.com/h/" + tag + "/?q=%23" + tag + "&page=" + page + "&sort=date&lazy=1";
|
||||
this.sendUpdate(STATUS.LOADING_RESOURCE, url);
|
||||
logger.info(" Retrieving " + url);
|
||||
Document doc;
|
||||
try {
|
||||
doc = Http.url(url)
|
||||
.ignoreContentType()
|
||||
.get();
|
||||
} catch (IOException e) {
|
||||
if (e.toString().contains("Status=404")) {
|
||||
logger.info("No more pages to load");
|
||||
} else {
|
||||
logger.warn("Exception while loading " + url, e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
for (Element image : doc.select("a.l > img")) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
String imageUrl = image.attr("src");
|
||||
String imageUrl = image.attr("src");
|
||||
|
||||
// Construct direct link to image based on thumbnail
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String[] fields = imageUrl.split("/");
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
if (i == fields.length - 2 && fields[i].equals("m")) {
|
||||
fields[i] = "b";
|
||||
}
|
||||
sb.append(fields[i]);
|
||||
if (i < fields.length - 1) {
|
||||
sb.append("/");
|
||||
}
|
||||
// Construct direct link to image based on thumbnail
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String[] fields = imageUrl.split("/");
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
if (i == fields.length - 2 && fields[i].equals("m")) {
|
||||
fields[i] = "b";
|
||||
}
|
||||
sb.append(fields[i]);
|
||||
if (i < fields.length - 1) {
|
||||
sb.append("/");
|
||||
}
|
||||
imageUrl = sb.toString();
|
||||
addURLToDownload(new URL("http:" + imageUrl));
|
||||
}
|
||||
if (doc.select("#next").size() == 0) {
|
||||
break; // No more pages
|
||||
}
|
||||
// Wait before loading next page
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("[!] Interrupted while waiting to load next album:", e);
|
||||
break;
|
||||
}
|
||||
imageUrl = sb.toString();
|
||||
results.add("http:" + imageUrl);
|
||||
}
|
||||
waitForThreads();
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Element elem = doc.select("a.next").first();
|
||||
if (elem == null) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextPage = elem.attr("href");
|
||||
// Some times this returns a empty string
|
||||
// This for stops that
|
||||
if (nextPage.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
else {
|
||||
return Http.url("https://smutty.com" + nextPage).get();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -117,6 +110,12 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
if (m.matches()) {
|
||||
return m.group(1).replace("%23", "");
|
||||
}
|
||||
|
||||
p = Pattern.compile("^https?://smutty.com/user/([a-zA-Z0-9\\-_]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,13 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.SmuttyRipper;
|
||||
|
||||
public class SmuttyRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user