diff --git a/pom.xml b/pom.xml
index 8ce4b5ae..0796e60f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
com.rarchives.ripme
ripme
jar
- 1.7.34
+ 1.7.44
ripme
http://rip.rarchives.com
diff --git a/ripme.json b/ripme.json
index 3a07f1b7..757058ae 100644
--- a/ripme.json
+++ b/ripme.json
@@ -1,6 +1,16 @@
{
- "latestVersion": "1.7.34",
+ "latestVersion": "1.7.44",
"changeList": [
+ "1.7.44: Fixed instagram ripper regex",
+ "1.7.43: Fixed queryId regex in instagram ripper",
+ "1.7.42: Added user support to SmuttyRipper; Removed vine ripper; Fixed NudeGalsRipper; addURLToDownload improvments; Fixed Instagram ripper",
+ "1.7.41: Added support for spyingwithlana.com; Added ManganeloRipper; Added support for dynasty-scans.com",
+ "1.7.40: Added hypnohub.net ripper; Fixed rule34.xxx ripper; Tsumino Ripper now add .png to filenames",
+ "1.7.39: Added rule34.xxx ripper; Added Gfycatporntube.com ripper; Fixed AbstractRipper subdir bug; Added AbstractRipper unit tests",
+ "1.7.38: Added http and socks proxy support; Extended some unit tests to include getGid; Added HitomiRipper; hentaifoundry ripper now can rip all images from accounts",
+ "1.7.37: MInor code clean up; Added socks proxy support; Added support for 8muses.download; Hentaifoundry no longer errors when there are no more pages; Fix bug that causes tumblr to replace https with httpss when downloading resized images",
+ "1.7.36: Fixed Instagram ripper; Fixed hentai2read ripper test; Fixed tnbtu.com ripper",
+ "1.7.35: Fixed instagram ripper; hentai2read ripper now properly names folders",
"1.7.34: Added Blackbrickroadofoz Ripper; Fixed webtoons regex",
"1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
"1.7.32: Instagram ripper update to use new enpoints",
diff --git a/src/main/java/com/rarchives/ripme/App.java b/src/main/java/com/rarchives/ripme/App.java
index 67c44ab1..6f650d56 100644
--- a/src/main/java/com/rarchives/ripme/App.java
+++ b/src/main/java/com/rarchives/ripme/App.java
@@ -27,6 +27,7 @@ import com.rarchives.ripme.ui.History;
import com.rarchives.ripme.ui.HistoryEntry;
import com.rarchives.ripme.ui.MainWindow;
import com.rarchives.ripme.ui.UpdateUtils;
+import com.rarchives.ripme.utils.Proxy;
import com.rarchives.ripme.utils.RipUtils;
import com.rarchives.ripme.utils.Utils;
@@ -47,6 +48,12 @@ public class App {
System.exit(0);
}
+ if (Utils.getConfigString("proxy.http", null) != null) {
+ Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", null));
+ } else if (Utils.getConfigString("proxy.socks", null) != null) {
+ Proxy.setSocks(Utils.getConfigString("proxy.socks", null));
+ }
+
if (GraphicsEnvironment.isHeadless() || args.length > 0) {
handleArguments(args);
} else {
@@ -67,7 +74,7 @@ public class App {
/**
* Creates an abstract ripper and instructs it to rip.
* @param url URL to be ripped
- * @throws Exception
+ * @throws Exception
*/
private static void rip(URL url) throws Exception {
AbstractRipper ripper = AbstractRipper.getRipper(url);
@@ -95,6 +102,16 @@ public class App {
Utils.setConfigBoolean("file.overwrite", true);
}
+ if (cl.hasOption('s')) {
+ String sservfull = cl.getOptionValue('s').trim();
+ Proxy.setSocks(sservfull);
+ }
+
+ if (cl.hasOption('p')) {
+ String proxyserverfull = cl.getOptionValue('p').trim();
+ Proxy.setHTTPProxy(proxyserverfull);
+ }
+
if (cl.hasOption('t')) {
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
}
@@ -195,6 +212,7 @@ public class App {
String url = cl.getOptionValue('u').trim();
ripURL(url, cl.hasOption("n"));
}
+
}
/**
@@ -242,6 +260,8 @@ public class App {
opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
opts.addOption("v", "version", false, "Show current version");
+ opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
+ opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
return opts;
}
@@ -260,7 +280,7 @@ public class App {
return null;
}
}
-
+
/**
* Loads history from history file into memory.
*/
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
index 6068ed18..20889495 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
@@ -192,7 +192,8 @@ public abstract class AbstractRipper
* True if downloaded successfully
* False if failed to download
*/
- protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
+ protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies,
+ Boolean getFileExtFromMIME);
/**
* Queues image to be downloaded and saved.
@@ -212,7 +213,7 @@ public abstract class AbstractRipper
* True if downloaded successfully
* False if failed to download
*/
- protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) {
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension, Boolean getFileExtFromMIME) {
// Don't re-add the url if it was downloaded in a previous rip
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
if (hasDownloadedURL(url.toExternalForm())) {
@@ -228,21 +229,7 @@ public abstract class AbstractRipper
return false;
}
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
- String saveAs;
- if (fileName != null) {
- saveAs = fileName;
- // Get the extension of the file
- String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
- saveAs = saveAs + "." + extension;
- } else {
- saveAs = url.toExternalForm();
- saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
- }
-
- if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
- if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
- if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
- if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
+ String saveAs = getFileName(url, fileName, extension);
File saveFileAs;
try {
if (!subdirectory.equals("")) {
@@ -271,7 +258,15 @@ public abstract class AbstractRipper
logger.debug("Unable to write URL history file");
}
}
- return addURLToDownload(url, saveFileAs, referrer, cookies);
+ return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
+ }
+
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension) {
+ return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false);
+ }
+
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) {
+ return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, null);
}
/**
@@ -306,6 +301,35 @@ public abstract class AbstractRipper
return addURLToDownload(url, prefix, "");
}
+ public static String getFileName(URL url, String fileName, String extension) {
+ String saveAs;
+ if (fileName != null) {
+ saveAs = fileName;
+ } else {
+ saveAs = url.toExternalForm();
+ saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
+ }
+ if (extension == null) {
+ // Get the extension of the file
+ String[] lastBitOfURL = url.toExternalForm().split("/");
+
+ String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split(".");
+ if (lastBit.length != 0) {
+ extension = lastBit[lastBit.length - 1];
+ saveAs = saveAs + "." + extension;
+ }
+ }
+
+ if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
+ if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
+ if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
+ if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
+ if (extension != null) {
+ saveAs = saveAs + "." + extension;
+ }
+ return saveAs;
+ }
+
/**
* Waits for downloading threads to complete.
diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
index 1726343a..f700f012 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
@@ -50,7 +50,7 @@ public abstract class AlbumRipper extends AbstractRipper {
/**
* Queues multiple URLs of single images to download from a single Album URL
*/
- public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) {
// Only download one file if this is a test.
if (super.isThisATest() &&
(itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
@@ -82,7 +82,7 @@ public abstract class AlbumRipper extends AbstractRipper {
}
else {
itemsPending.put(url, saveAs);
- DownloadFileThread dft = new DownloadFileThread(url, saveAs, this);
+ DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME);
if (referrer != null) {
dft.setReferrer(referrer);
}
@@ -96,7 +96,7 @@ public abstract class AlbumRipper extends AbstractRipper {
@Override
public boolean addURLToDownload(URL url, File saveAs) {
- return addURLToDownload(url, saveAs, null, null);
+ return addURLToDownload(url, saveAs, null, null, false);
}
/**
diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
index c62d58a6..42dedffe 100644
--- a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
+++ b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
@@ -8,6 +8,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
+import java.net.URLConnection;
import java.util.HashMap;
import java.util.Map;
@@ -36,10 +37,11 @@ class DownloadFileThread extends Thread {
private String prettySaveAs;
private AbstractRipper observer;
private int retries;
+ private Boolean getFileExtFromMIME;
private final int TIMEOUT;
- public DownloadFileThread(URL url, File saveAs, AbstractRipper observer) {
+ public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) {
super();
this.url = url;
this.saveAs = saveAs;
@@ -47,6 +49,7 @@ class DownloadFileThread extends Thread {
this.observer = observer;
this.retries = Utils.getConfigInteger("download.retries", 1);
this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
+ this.getFileExtFromMIME = getFileExtFromMIME;
}
public void setReferrer(String referrer) {
@@ -143,9 +146,15 @@ class DownloadFileThread extends Thread {
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
return;
}
-
// Save file
bis = new BufferedInputStream(huc.getInputStream());
+
+ // Check if we should get the file ext from the MIME type
+ if (getFileExtFromMIME) {
+ String fileExt = URLConnection.guessContentTypeFromStream(bis).replaceAll("image/", "");
+ saveAs = new File(saveAs.toString() + "." + fileExt);
+ }
+
fos = new FileOutputStream(saveAs);
IOUtils.copy(bis, fos);
break; // Download successful: break out of infinite loop
diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
index 13008cd9..29200d5a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
@@ -10,6 +10,7 @@ import java.util.Map;
import com.rarchives.ripme.ui.RipStatusMessage;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Utils;
+import com.sun.org.apache.xpath.internal.operations.Bool;
public abstract class VideoRipper extends AbstractRipper {
@@ -70,7 +71,7 @@ public abstract class VideoRipper extends AbstractRipper {
}
@Override
- public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) {
return addURLToDownload(url, saveAs);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java
index 76340cf9..cb5d4b14 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java
@@ -55,14 +55,8 @@ public class BlackbrickroadofozRipper extends AbstractHTMLRipper {
throw new IOException("No more pages");
}
String nextPage = elem.attr("href");
- // Some times this returns a empty string
- // This for stops that
- if (nextPage == "") {
- throw new IOException("No more pages");
- }
- else {
- return Http.url(nextPage).get();
- }
+ return Http.url(nextPage).get();
+
}
@Override
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
index fb29171b..7ccf558c 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
@@ -59,7 +59,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
String nextPage = elem.attr("href");
// Some times this returns a empty string
// This for stops that
- if (nextPage == "") {
+ if (nextPage.equals("")) {
return null;
}
else {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java
new file mode 100644
index 00000000..37d3ad93
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java
@@ -0,0 +1,84 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONArray;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class DynastyscansRipper extends AbstractHTMLRipper {
+
+ public DynastyscansRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "dynasty-scans";
+ }
+
+ @Override
+ public String getDomain() {
+ return "dynasty-scans.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://dynasty-scans.com/chapters/([\\S]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected dynasty-scans URL format: " +
+ "dynasty-scans.com/chapters/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("a[id=next_link]").first();
+ if (elem == null || elem.attr("href").equals("#")) {
+ throw new IOException("No more pages");
+ }
+ return Http.url("https://dynasty-scans.com" + elem.attr("href")).get();
+
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ String jsonText = null;
+ for (Element script : doc.select("script")) {
+ if (script.data().contains("var pages")) {
+ jsonText = script.data().replaceAll("var pages = ", "");
+ jsonText = jsonText.replaceAll("//", "");
+ }
+ }
+ JSONArray imageArray = new JSONArray(jsonText);
+ for (int i = 0; i < imageArray.length(); i++) {
+ result.add("https://dynasty-scans.com" + imageArray.getJSONObject(i).getString("image"));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
index 77ca9102..ca9c24e3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
@@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
index e7019178..737b8092 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
@@ -86,7 +86,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
throw new IOException("No more pages");
}
nextUrl = elem.attr("href");
- if (nextUrl == "") {
+ if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
return Http.url("eroshae.com" + nextUrl).get();
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
index 7e532943..93aedba2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
@@ -332,7 +332,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
String[] fields = u.split("/");
String prefix = getPrefix(index) + fields[fields.length - 3];
File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg");
- addURLToDownload(url, saveAs, "", null);
+ addURLToDownload(url, saveAs, "", null, false);
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
index 973796cf..68aa950a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
@@ -13,7 +13,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.rarchives.ripme.utils.Utils;
-import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
@@ -23,7 +22,6 @@ import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.DownloadThreadPool;
-import com.rarchives.ripme.utils.Base64;
import com.rarchives.ripme.utils.Http;
public class FuraffinityRipper extends AbstractHTMLRipper {
@@ -162,10 +160,6 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
if (!subdirectory.equals("")) {
subdirectory = File.separator + subdirectory;
}
- int o = url.toString().lastIndexOf('/')-1;
- String test = url.toString().substring(url.toString().lastIndexOf('/',o)+1);
- test = test.replace("/",""); // This is probably not the best way to do this.
- test = test.replace("\\",""); // CLOSE ENOUGH!
saveFileAs = new File(
workingDir.getCanonicalPath()
+ subdirectory
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java
new file mode 100644
index 00000000..504b89d6
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java
@@ -0,0 +1,61 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class GfycatporntubeRipper extends AbstractHTMLRipper {
+
+ public GfycatporntubeRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "gfycatporntube";
+ }
+
+ @Override
+ public String getDomain() {
+ return "gfycatporntube.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://gfycatporntube.com/([a-zA-Z1-9_-]*)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected gfycatporntube URL format: " +
+ "gfycatporntube.com/NAME - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ result.add(doc.select("source[id=mp4Source]").attr("src"));
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
index 6e76bbc6..405d0563 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
@@ -33,7 +33,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("https://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
+ Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d)?/?");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
@@ -63,9 +63,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
try {
- Document doc = getFirstPage();
- String title = doc.select("span[itemprop=title]").text();
- return getHost() + "_" + title;
+ return getHost() + "_" + getGID(url);
} catch (Exception e) {
// Fall back to default album naming convention
logger.warn("Failed to get album title from " + url, e);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
index 561c4249..8d953de1 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
@@ -10,6 +10,7 @@ import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
@@ -49,19 +50,61 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
@Override
public Document getFirstPage() throws IOException {
- Response resp = Http.url("http://www.hentai-foundry.com/").response();
- cookies = resp.cookies();
+ Response resp;
+ Document doc;
+
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .response();
// The only cookie that seems to matter in getting around the age wall is the phpsession cookie
cookies.putAll(resp.cookies());
- sleep(500);
+
+ doc = resp.parse();
+ String csrf_token = doc.select("input[name=YII_CSRF_TOKEN]")
+ .first().attr("value");
+ if (csrf_token != null) {
+ Map data = new HashMap<>();
+ data.put("YII_CSRF_TOKEN" , csrf_token);
+ data.put("rating_nudity" , "3");
+ data.put("rating_violence" , "3");
+ data.put("rating_profanity", "3");
+ data.put("rating_racism" , "3");
+ data.put("rating_sex" , "3");
+ data.put("rating_spoilers" , "3");
+ data.put("rating_yaoi" , "1");
+ data.put("rating_yuri" , "1");
+ data.put("rating_teen" , "1");
+ data.put("rating_guro" , "1");
+ data.put("rating_furry" , "1");
+ data.put("rating_beast" , "1");
+ data.put("rating_male" , "1");
+ data.put("rating_female" , "1");
+ data.put("rating_futa" , "1");
+ data.put("rating_other" , "1");
+ data.put("rating_scat" , "1");
+ data.put("rating_incest" , "1");
+ data.put("rating_rape" , "1");
+ data.put("filter_media" , "A");
+ data.put("filter_order" , "date_new");
+ data.put("filter_type" , "0");
+
+ resp = Http.url("http://www.hentai-foundry.com/site/filters")
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .data(data)
+ .method(Method.POST)
+ .response();
+ cookies.putAll(resp.cookies());
+ }
+ else {
+ logger.info("unable to find csrf_token and set filter");
+ }
+
resp = Http.url(url)
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .response();
cookies.putAll(resp.cookies());
return resp.parse();
}
@@ -74,12 +117,16 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
}
Elements els = doc.select("li.next > a");
Element first = els.first();
- String nextURL = first.attr("href");
- nextURL = "http://www.hentai-foundry.com" + nextURL;
- return Http.url(nextURL)
- .referrer(url)
- .cookies(cookies)
- .get();
+ try {
+ String nextURL = first.attr("href");
+ nextURL = "http://www.hentai-foundry.com" + nextURL;
+ return Http.url(nextURL)
+ .referrer(url)
+ .cookies(cookies)
+ .get();
+ } catch (NullPointerException e) {
+ throw new IOException("No more pages");
+ }
}
@Override
@@ -97,13 +144,6 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
}
Document imagePage;
try {
- Response resp = Http.url("http://www.hentai-foundry.com/").response();
- cookies = resp.cookies();
- resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
- cookies.putAll(resp.cookies());
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
new file mode 100644
index 00000000..3e92cc61
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
@@ -0,0 +1,73 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class HitomiRipper extends AbstractHTMLRipper {
+
+ String galleryId = "";
+
+ public HitomiRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "hitomi";
+ }
+
+ @Override
+ public String getDomain() {
+ return "hitomi.la";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ galleryId = m.group(1);
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected hitomi URL format: " +
+ "https://hitomi.la/galleries/ID.html - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // if we go to /GALLERYID.js we get a nice json array of all images in the gallery
+ return Http.url(new URL(url.toExternalForm().replaceAll(".html", ".js"))).ignoreContentType().get();
+ }
+
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ String json = doc.text().replaceAll("var galleryinfo =", "");
+ logger.info(json);
+ JSONArray json_data = new JSONArray(json);
+ for (int i = 0; i < json_data.length(); i++) {
+ result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
new file mode 100644
index 00000000..1eabefb9
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
@@ -0,0 +1,91 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import javax.print.Doc;
+
+public class HypnohubRipper extends AbstractHTMLRipper {
+
+ public HypnohubRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "hypnohub";
+ }
+
+ @Override
+ public String getDomain() {
+ return "hypnohub.net";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/([\\S]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1) + "_" + m.group(2);
+ }
+ throw new MalformedURLException("Expected cfake URL format: " +
+ "hypnohub.net/pool/show/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ private String ripPost(String url) throws IOException {
+ logger.info(url);
+ Document doc = Http.url(url).get();
+ return "https:" + doc.select("img.image").attr("src");
+
+ }
+
+ private String ripPost(Document doc) {
+ logger.info(url);
+ return "https:" + doc.select("img.image").attr("src");
+
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ if (url.toExternalForm().contains("/pool")) {
+ for (Element el : doc.select("ul[id=post-list-posts] > li > div > a.thumb")) {
+ try {
+ result.add(ripPost("https://hypnohub.net" + el.attr("href")));
+ } catch (IOException e) {
+ return result;
+ }
+ }
+ } else if (url.toExternalForm().contains("/post")) {
+ result.add(ripPost(doc));
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
index 12842aa8..efc4cb40 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
@@ -328,7 +328,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
}
private String getIGGis(String variables) {
- String stringToMD5 = rhx_gis + ":" + csrftoken + ":" + variables;
+ String stringToMD5 = rhx_gis + ":" + variables;
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
try {
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
@@ -374,7 +374,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
try {
// Sleep for a while to avoid a ban
sleep(2500);
- String vars = "{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}";
+ String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
String ig_gis = getIGGis(vars);
logger.info(ig_gis);
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars
@@ -435,6 +435,12 @@ public class InstagramRipper extends AbstractHTMLRipper {
if (m.find()) {
return m.group(1);
}
+ jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
+ m = jsP.matcher(sb.toString());
+ if (m.find()) {
+ return m.group(1);
+ }
+
} else {
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
Matcher m = jsP.matcher(sb.toString());
@@ -442,7 +448,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
return m.group(1);
}
}
- logger.info("Could not find query_hash on " + jsFileURL);
+ logger.error("Could not find query_hash on " + jsFileURL);
return null;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
index c98dac0c..376d1292 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
@@ -50,7 +50,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
// This is here for pages with mp4s instead of images
String video_image = "";
video_image = page.select("div > video > source").attr("src");
- if (video_image != "") {
+ if (!video_image.equals("")) {
urls.add(video_image);
}
return urls;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
new file mode 100644
index 00000000..8f8f8e68
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
@@ -0,0 +1,116 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import javax.print.Doc;
+
+public class ManganeloRipper extends AbstractHTMLRipper {
+
+ public ManganeloRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "manganelo";
+ }
+
+ @Override
+ public String getDomain() {
+ return "manganelo.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://manganelo.com/manga/([\\S]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+
+ p = Pattern.compile("http://manganelo.com/chapter/([\\S]+)/([\\S]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected manganelo URL format: " +
+ "/manganelo.com/manga/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("div.btn-navigation-chap > a.back").first();
+ if (elem == null) {
+ throw new IOException("No more pages");
+ } else {
+ return Http.url(elem.attr("href")).get();
+ }
+ }
+
+ private List getURLsFromChap(String url) {
+ logger.debug("Getting urls from " + url);
+ List result = new ArrayList<>();
+ try {
+ Document doc = Http.url(url).get();
+ for (Element el : doc.select("img.img_content")) {
+ result.add(el.attr("src"));
+ }
+ return result;
+ } catch (IOException e) {
+ return null;
+ }
+
+ }
+
+ private List getURLsFromChap(Document doc) {
+ logger.debug("Getting urls from " + url);
+ List result = new ArrayList<>();
+ for (Element el : doc.select("img.img_content")) {
+ result.add(el.attr("src"));
+ }
+ return result;
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ List urlsToGrab = new ArrayList<>();
+ if (url.toExternalForm().contains("/manga/")) {
+ for (Element el : doc.select("div.chapter-list > div.row > span > a")) {
+ urlsToGrab.add(el.attr("href"));
+ }
+ Collections.reverse(urlsToGrab);
+
+ for (String url : urlsToGrab) {
+ result.addAll(getURLsFromChap(url));
+ }
+ } else if (url.toExternalForm().contains("/chapter/")) {
+ result.addAll(getURLsFromChap(doc));
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
index 21942a47..5b60c4f2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
@@ -73,7 +73,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
if (m.matches()) {
nextUrl = "http://myhentaicomics.com" + m.group(0);
}
- if (nextUrl == "") {
+ if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
// Sleep for half a sec to avoid getting IP banned
@@ -100,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href");
pageNumber = pageNumber + 1;
- if (nextPage == "") {
+ if (nextPage.equals("")) {
logger.info("Got " + pageNumber + " pages");
break;
}
@@ -220,7 +220,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
// If true the page is a page of albums
if (doc.toString().contains("class=\"g-item g-album\"")) {
// This if checks that there is more than 1 page
- if (doc.select("a.ui-icon-right").last().attr("href") != "") {
+ if (!doc.select("a.ui-icon-right").last().attr("href").equals("")) {
// There is more than one page so we call getListOfPages
List pagesToRip = getListOfPages(doc);
logger.debug("Pages to rip = " + pagesToRip);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
index d3cb0ab1..3300da50 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
@@ -33,23 +33,6 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
return "nude-gals.com";
}
- public String getAlbumTitle(URL url) throws MalformedURLException {
- try {
- Document doc = getFirstPage();
- Elements elems = doc.select("#left_col > #grid_title > .right");
-
- String girl = elems.get(3).text();
- String magazine = elems.get(2).text();
- String title = elems.get(0).text();
-
- return getHost() + "_" + girl + "-" + magazine + "-" + title;
- } catch (Exception e) {
- // Fall back to default album naming convention
- logger.warn("Failed to get album title from " + url, e);
- }
- return super.getAlbumTitle(url);
- }
-
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p;
@@ -79,9 +62,9 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
public List getURLsFromPage(Document doc) {
List imageURLs = new ArrayList<>();
- Elements thumbs = doc.select("#grid_container .grid > .grid_box");
+ Elements thumbs = doc.select("img.thumbnail");
for (Element thumb : thumbs) {
- String link = thumb.select("a").get(1).attr("href");
+ String link = thumb.attr("src").replaceAll("thumbs/th_", "");
String imgSrc = "http://nude-gals.com/" + link;
imageURLs.add(imgSrc);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java
new file mode 100644
index 00000000..b7e0f7b0
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java
@@ -0,0 +1,94 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class Rule34Ripper extends AbstractHTMLRipper {
+
+ public Rule34Ripper(URL url) throws IOException {
+ super(url);
+ }
+
+ private String apiUrl;
+ private int pageNumber = 0;
+
+ @Override
+ public String getHost() {
+ return "rule34";
+ }
+
+ @Override
+ public String getDomain() {
+ return "rule34.xxx";
+ }
+
+ @Override
+ public boolean canRip(URL url){
+ Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected rule34.xxx URL format: " +
+ "rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead");
+ }
+
+ public URL getAPIUrl() throws MalformedURLException {
+ URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url));
+ return urlToReturn;
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ apiUrl = getAPIUrl().toExternalForm();
+ // "url" is an instance field of the superclass
+ return Http.url(getAPIUrl()).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ if (doc.html().contains("Search error: API limited due to abuse")) {
+ throw new IOException("No more pages");
+ }
+ pageNumber += 1;
+ String nextPage = apiUrl + "&pid=" + pageNumber;
+ return Http.url(nextPage).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ for (Element el : doc.select("posts > post")) {
+ String imageSource = el.select("post").attr("file_url");
+ result.add(imageSource);
+
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
index d30e9b63..9de3d2ae 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
@@ -57,7 +57,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
String nextPage = elem.parent().attr("href");
// Some times this returns a empty string
// This for stops that
- if (nextPage == "") {
+ if (nextPage.equals("")) {
return null;
}
else {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
index 51992ec4..b61f2fef 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
@@ -3,24 +3,19 @@ package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
-import com.rarchives.ripme.ripper.AlbumRipper;
-import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
-/**
- * Appears to be broken as of 2015-02-11.
- * Generating large image from thumbnail requires replacing "/m/" with something else:
- * -> Sometimes "/b/"
- * -> Sometimes "/p/"
- * No way to know without loading the image page.
- */
-public class SmuttyRipper extends AlbumRipper {
+
+public class SmuttyRipper extends AbstractHTMLRipper {
private static final String DOMAIN = "smutty.com",
HOST = "smutty";
@@ -29,6 +24,16 @@ public class SmuttyRipper extends AlbumRipper {
super(url);
}
+ @Override
+ public String getHost() {
+ return "smutty";
+ }
+
+ @Override
+ public String getDomain() {
+ return "smutty.com";
+ }
+
@Override
public boolean canRip(URL url) {
return (url.getHost().endsWith(DOMAIN));
@@ -40,69 +45,57 @@ public class SmuttyRipper extends AlbumRipper {
}
@Override
- public void rip() throws IOException {
- int page = 0;
- String url, tag = getGID(this.url);
- boolean hasNextPage = true;
- while (hasNextPage) {
+ public List getURLsFromPage(Document doc) {
+ List results = new ArrayList<>();
+ for (Element image : doc.select("a.l > img")) {
if (isStopped()) {
break;
}
- page++;
- url = "http://smutty.com/h/" + tag + "/?q=%23" + tag + "&page=" + page + "&sort=date&lazy=1";
- this.sendUpdate(STATUS.LOADING_RESOURCE, url);
- logger.info(" Retrieving " + url);
- Document doc;
- try {
- doc = Http.url(url)
- .ignoreContentType()
- .get();
- } catch (IOException e) {
- if (e.toString().contains("Status=404")) {
- logger.info("No more pages to load");
- } else {
- logger.warn("Exception while loading " + url, e);
- }
- break;
- }
- for (Element image : doc.select("a.l > img")) {
- if (isStopped()) {
- break;
- }
- String imageUrl = image.attr("src");
+ String imageUrl = image.attr("src");
- // Construct direct link to image based on thumbnail
- StringBuilder sb = new StringBuilder();
- String[] fields = imageUrl.split("/");
- for (int i = 0; i < fields.length; i++) {
- if (i == fields.length - 2 && fields[i].equals("m")) {
- fields[i] = "b";
- }
- sb.append(fields[i]);
- if (i < fields.length - 1) {
- sb.append("/");
- }
+ // Construct direct link to image based on thumbnail
+ StringBuilder sb = new StringBuilder();
+ String[] fields = imageUrl.split("/");
+ for (int i = 0; i < fields.length; i++) {
+ if (i == fields.length - 2 && fields[i].equals("m")) {
+ fields[i] = "b";
+ }
+ sb.append(fields[i]);
+ if (i < fields.length - 1) {
+ sb.append("/");
}
- imageUrl = sb.toString();
- addURLToDownload(new URL("http:" + imageUrl));
- }
- if (doc.select("#next").size() == 0) {
- break; // No more pages
- }
- // Wait before loading next page
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- logger.error("[!] Interrupted while waiting to load next album:", e);
- break;
}
+ imageUrl = sb.toString();
+ results.add("http:" + imageUrl);
}
- waitForThreads();
+ return results;
}
@Override
- public String getHost() {
- return HOST;
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("a.next").first();
+ if (elem == null) {
+ throw new IOException("No more pages");
+ }
+ String nextPage = elem.attr("href");
+ // Some times this returns a empty string
+ // This for stops that
+ if (nextPage.equals("")) {
+ throw new IOException("No more pages");
+ }
+ else {
+ return Http.url("https://smutty.com" + nextPage).get();
+ }
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
}
@Override
@@ -117,6 +110,12 @@ public class SmuttyRipper extends AlbumRipper {
if (m.matches()) {
return m.group(1).replace("%23", "");
}
+
+ p = Pattern.compile("^https?://smutty.com/user/([a-zA-Z0-9\\-_]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
index 9ca91e45..7d35fc1d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
@@ -17,7 +17,6 @@ import org.json.JSONObject;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
-import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@@ -35,13 +34,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
try {
// This sessionId will expire and need to be replaced
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
- logger.info(cookies);
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
String jsonInfo = doc.html().replaceAll("","").replaceAll("", "").replaceAll("", "").replaceAll("", "")
.replaceAll("", "").replaceAll("\n", "");
- logger.info(jsonInfo);
JSONObject json = new JSONObject(jsonInfo);
- logger.info(json.getJSONArray("reader_page_urls"));
return json.getJSONArray("reader_page_urls");
} catch (IOException e) {
logger.info(e);
@@ -85,7 +81,6 @@ public class TsuminoRipper extends AbstractHTMLRipper {
public Document getFirstPage() throws IOException {
Connection.Response resp = Http.url(url).response();
cookies.putAll(resp.cookies());
- logger.info(resp.parse());
return resp.parse();
}
@@ -103,6 +98,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
@Override
public void downloadURL(URL url, int index) {
sleep(1000);
- addURLToDownload(url, getPrefix(index));
+ /*
+ There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always
+ "image/jpeg" even when the image is a png. The file ext is not included in the url.
+ */
+ addURLToDownload(url, getPrefix(index), "", null, null, null, null, true);
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
index dc57c48f..89884854 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
@@ -44,7 +44,7 @@ public class TumblrRipper extends AlbumRipper {
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
/**
- * Gets the API key.
+ * Gets the API key.
* Chooses between default/included keys & user specified ones (from the config file).
* @return Tumblr API key
*/
@@ -57,7 +57,7 @@ public class TumblrRipper extends AlbumRipper {
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
return userDefinedAPIKey;
}
-
+
}
public TumblrRipper(URL url) throws IOException {
@@ -71,12 +71,12 @@ public class TumblrRipper extends AlbumRipper {
public boolean canRip(URL url) {
return url.getHost().endsWith(DOMAIN);
}
-
+
/**
* Sanitizes URL.
* @param url URL to be sanitized.
* @return Sanitized URL
- * @throws MalformedURLException
+ * @throws MalformedURLException
*/
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
@@ -230,7 +230,7 @@ public class TumblrRipper extends AlbumRipper {
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
fileURL = new URL(urlString);
} else {
- fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http", "https"));
+ fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"));
}
m = p.matcher(fileURL.toString());
if (m.matches()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
index ab34620c..abdb0320 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
@@ -4,14 +4,12 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
-import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java
deleted file mode 100644
index 1ba53926..00000000
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java
+++ /dev/null
@@ -1,95 +0,0 @@
-package com.rarchives.ripme.ripper.rippers;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.json.JSONArray;
-import org.json.JSONObject;
-import org.jsoup.HttpStatusException;
-
-import com.rarchives.ripme.ripper.AlbumRipper;
-import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
-import com.rarchives.ripme.utils.Http;
-
-public class VineRipper extends AlbumRipper {
-
- private static final String DOMAIN = "vine.co",
- HOST = "vine";
-
- public VineRipper(URL url) throws IOException {
- super(url);
- }
-
- @Override
- public boolean canRip(URL url) {
- return url.getHost().endsWith(DOMAIN);
- }
-
- @Override
- public URL sanitizeURL(URL url) throws MalformedURLException {
- return new URL("http://vine.co/u/" + getGID(url));
- }
-
- @Override
- public void rip() throws IOException {
- int page = 0;
- String baseURL = "https://vine.co/api/timelines/users/" + getGID(this.url);
- JSONObject json = null;
- while (true) {
- page++;
- String theURL = baseURL;
- if (page > 1) {
- theURL += "?page=" + page;
- }
- try {
- logger.info(" Retrieving " + theURL);
- sendUpdate(STATUS.LOADING_RESOURCE, theURL);
- json = Http.url(theURL).getJSON();
- } catch (HttpStatusException e) {
- logger.debug("Hit end of pages at page " + page, e);
- break;
- }
- JSONArray records = json.getJSONObject("data").getJSONArray("records");
- for (int i = 0; i < records.length(); i++) {
- String videoURL = records.getJSONObject(i).getString("videoUrl");
- addURLToDownload(new URL(videoURL));
- if (isThisATest()) {
- break;
- }
- }
- if (isThisATest()) {
- break;
- }
- if (records.length() == 0) {
- logger.info("Zero records returned");
- break;
- }
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- logger.error("[!] Interrupted while waiting to load next page", e);
- break;
- }
- }
- waitForThreads();
- }
-
- @Override
- public String getHost() {
- return HOST;
- }
-
- @Override
- public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^https?://(www\\.)?vine\\.co/u/([0-9]+).*$");
- Matcher m = p.matcher(url.toExternalForm());
- if (!m.matches()) {
- throw new MalformedURLException("Expected format: http://vine.co/u/######");
- }
- return m.group(m.groupCount());
- }
-
-}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
index dbc44585..8b7b7b80 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
@@ -44,7 +44,20 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
"freeadultcomix.com",
"thisis.delvecomic.com",
"tnbtu.com",
- "shipinbottle.pepsaga.com"
+ "shipinbottle.pepsaga.com",
+ "8muses.download",
+ "spyingwithlana.com"
+ );
+
+ private static List theme1 = Arrays.asList(
+ "www.totempole666.com",
+ "buttsmithy.com",
+ "themonsterunderthebed.net",
+ "prismblush.com",
+ "www.konradokonski.com",
+ "thisis.delvecomic.com",
+ "tnbtu.com",
+ "spyingwithlana.com"
);
@Override
@@ -135,6 +148,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
if (shipinbottleMat.matches()) {
return true;
}
+
+ Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
+ Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
+ if (eight_musesMat.matches()) {
+ return true;
+ }
+
+ Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
+ Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
+ if (spyingwithlanaMat.matches()) {
+ return true;
+ }
}
@@ -209,6 +234,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
return getHost() + "_" + "Ship_in_bottle";
}
+ Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
+ Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
+ if (eight_musesMat.matches()) {
+ return getHost() + "_" + eight_musesMat.group(1);
+ }
+
+ Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
+ Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
+ if (spyingwithlanaMat.matches()) {
+ return "spyingwithlana_" + spyingwithlanaMat.group(1).replaceAll("-page-\\d", "");
+ }
+
return super.getAlbumTitle(url);
}
@@ -227,13 +264,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
// Find next page
String nextPage = "";
Element elem = null;
- if (getHost().contains("www.totempole666.com")
- || getHost().contains("buttsmithy.com")
- || getHost().contains("themonsterunderthebed.net")
- || getHost().contains("prismblush.com")
- || getHost().contains("www.konradokonski.com")
- || getHost().contains("thisis.delvecomic.com")
- || getHost().contains("tnbtu.com")) {
+ if (theme1.contains(getHost())) {
elem = doc.select("a.comic-nav-next").first();
if (elem == null) {
throw new IOException("No more pages");
@@ -247,7 +278,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
nextPage = elem.attr("href");
}
- if (nextPage == "") {
+ if (nextPage.equals("")) {
throw new IOException("No more pages");
} else {
return Http.url(nextPage).get();
@@ -257,13 +288,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
List result = new ArrayList<>();
- if (getHost().contains("www.totempole666.com")
- || getHost().contains("buttsmithy.com")
- || getHost().contains("themonsterunderthebed.net")
- || getHost().contains("prismblush.com")
- || getHost().contains("www.konradokonski.com")
- || getHost().contains("thisis.delvecomic.com")
- || getHost().contains("tnbtu.com")) {
+ if (theme1.contains(getHost())) {
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
// If doc is the last page in the comic then elem.attr("src") returns null
// because there is no link to the next page
@@ -315,6 +340,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
}
}
+ if (url.toExternalForm().contains("8muses.download")) {
+ for (Element elem : doc.select("div.popup-gallery > figure > a")) {
+ result.add(elem.attr("href"));
+ }
+ }
+
return result;
}
@@ -327,8 +358,14 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|| getHost().contains("themonsterunderthebed.net")) {
addURLToDownload(url, pageTitle + "_");
}
- // If we're ripping a site where we can't get the page number/title we just rip normally
- addURLToDownload(url, getPrefix(index));
+ if (getHost().contains("tnbtu.com")) {
+ // We need to set the referrer header for tnbtu
+ addURLToDownload(url, getPrefix(index), "","http://www.tnbtu.com/comic", null);
+ } else {
+ // If we're ripping a site where we can't get the page number/title we just rip normally
+ addURLToDownload(url, getPrefix(index));
+ }
+
}
@Override
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
index 35fe56ff..b92aa9dd 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
@@ -72,7 +72,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
- if (doc.select("a.next").first().attr("href") != "") {
+ if (!doc.select("a.next").first().attr("href").equals("")) {
return Http.url(doc.select("a.next").first().attr("href")).get();
} else {
throw new IOException("No more pages");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
index 4f2bac97..6dde798d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
@@ -3,7 +3,6 @@ package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
-import java.net.URLDecoder;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
diff --git a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
index 01b6c7eb..11d18771 100644
--- a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
+++ b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
@@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
public class UpdateUtils {
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
- private static final String DEFAULT_VERSION = "1.7.34";
+ private static final String DEFAULT_VERSION = "1.7.44";
private static final String REPO_NAME = "ripmeapp/ripme";
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
private static final String mainFileName = "ripme.jar";
diff --git a/src/main/java/com/rarchives/ripme/utils/Proxy.java b/src/main/java/com/rarchives/ripme/utils/Proxy.java
new file mode 100644
index 00000000..be3c3b7e
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/utils/Proxy.java
@@ -0,0 +1,99 @@
+package com.rarchives.ripme.utils;
+
+import java.net.Authenticator;
+import java.net.PasswordAuthentication;
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Proxy/Socks setter
+ */
+public class Proxy {
+ private Proxy() {
+ }
+
+ /**
+ * Parse the proxy server settings from string, using the format
+ * [user:password]@host[:port].
+ *
+ * @param fullproxy the string to parse
+ * @return HashMap containing proxy server, port, user and password
+ */
+ private static Map parseServer(String fullproxy) {
+ Map proxy = new HashMap();
+
+ if (fullproxy.lastIndexOf("@") != -1) {
+ int sservli = fullproxy.lastIndexOf("@");
+ String userpw = fullproxy.substring(0, sservli);
+ String[] usersplit = userpw.split(":");
+ proxy.put("user", usersplit[0]);
+ proxy.put("password", usersplit[1]);
+ fullproxy = fullproxy.substring(sservli + 1);
+ }
+ String[] servsplit = fullproxy.split(":");
+ if (servsplit.length == 2) {
+ proxy.put("port", servsplit[1]);
+ }
+ proxy.put("server", servsplit[0]);
+ return proxy;
+ }
+
+ /**
+ * Set a HTTP Proxy.
+ * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
+ * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
+ * see https://stackoverflow.com/q/41505219
+ *
+ * @param fullproxy the proxy, using format [user:password]@host[:port]
+ */
+ public static void setHTTPProxy(String fullproxy) {
+ Map proxyServer = parseServer(fullproxy);
+
+ if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
+ Authenticator.setDefault(new Authenticator(){
+ protected PasswordAuthentication getPasswordAuthentication(){
+ PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
+ return p;
+ }
+ });
+ System.setProperty("http.proxyUser", proxyServer.get("user"));
+ System.setProperty("http.proxyPassword", proxyServer.get("password"));
+ System.setProperty("https.proxyUser", proxyServer.get("user"));
+ System.setProperty("https.proxyPassword", proxyServer.get("password"));
+ }
+
+ if (proxyServer.get("port") != null) {
+ System.setProperty("http.proxyPort", proxyServer.get("port"));
+ System.setProperty("https.proxyPort", proxyServer.get("port"));
+ }
+
+ System.setProperty("http.proxyHost", proxyServer.get("server"));
+ System.setProperty("https.proxyHost", proxyServer.get("server"));
+ }
+
+ /**
+ * Set a Socks Proxy Server (globally).
+ *
+ * @param fullsocks the socks server, using format [user:password]@host[:port]
+ */
+ public static void setSocks(String fullsocks) {
+
+ Map socksServer = parseServer(fullsocks);
+ if (socksServer.get("user") != null && socksServer.get("password") != null) {
+ Authenticator.setDefault(new Authenticator(){
+ protected PasswordAuthentication getPasswordAuthentication(){
+ PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
+ return p;
+ }
+ });
+ System.setProperty("java.net.socks.username", socksServer.get("user"));
+ System.setProperty("java.net.socks.password", socksServer.get("password"));
+ }
+ if (socksServer.get("port") != null) {
+ System.setProperty("socksProxyPort", socksServer.get("port"));
+ }
+
+ System.setProperty("socksProxyHost", socksServer.get("server"));
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java
new file mode 100644
index 00000000..f1d8eff5
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java
@@ -0,0 +1,30 @@
+package com.rarchives.ripme.tst;
+
+import com.rarchives.ripme.ripper.AbstractRipper;
+import junit.framework.TestCase;
+
+import java.io.IOException;
+import java.net.URL;
+
+
+
+public class AbstractRipperTest extends TestCase {
+
+ public void testGetFileName() throws IOException {
+ String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test");
+ assertEquals("test.test", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null);
+ assertEquals("test", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null);
+ assertEquals("Object", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null);
+ assertEquals("file.png", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null);
+ assertEquals("file.", fileName);
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/proxyTest.java b/src/test/java/com/rarchives/ripme/tst/proxyTest.java
new file mode 100644
index 00000000..36ea2f55
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/proxyTest.java
@@ -0,0 +1,52 @@
+package com.rarchives.ripme.tst;
+
+import java.io.IOException;
+import java.net.URL;
+import com.rarchives.ripme.utils.Proxy;
+import com.rarchives.ripme.utils.Utils;
+import junit.framework.TestCase;
+import com.rarchives.ripme.utils.Http;
+
+
+public class proxyTest extends TestCase {
+
+
+ // This test will only run on machines where the user has added a entry for proxy.socks
+ public void testSocksProxy() throws IOException {
+ // Unset proxy before testing
+ System.setProperty("http.proxyHost", "");
+ System.setProperty("https.proxyHost", "");
+ System.setProperty("socksProxyHost", "");
+ URL url = new URL("https://icanhazip.com");
+ String proxyConfig = Utils.getConfigString("proxy.socks", "");
+ if (!proxyConfig.equals("")) {
+ String ip1 = Http.url(url).ignoreContentType().get().text();
+ Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
+ String ip2 = Http.url(url).ignoreContentType().get().text();
+ assertFalse(ip1.equals(ip2));
+ } else {
+ System.out.println("Skipping testSocksProxy");
+ assert(true);
+ }
+ }
+
+ // This test will only run on machines where the user has added a entry for proxy.http
+ public void testHTTPProxy() throws IOException {
+ // Unset proxy before testing
+ System.setProperty("http.proxyHost", "");
+ System.setProperty("https.proxyHost", "");
+ System.setProperty("socksProxyHost", "");
+ URL url = new URL("https://icanhazip.com");
+ String proxyConfig = Utils.getConfigString("proxy.http", "");
+ if (!proxyConfig.equals("")) {
+ String ip1 = Http.url(url).ignoreContentType().get().text();
+ Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
+ String ip2 = Http.url(url).ignoreContentType().get().text();
+ assertFalse(ip1.equals(ip2));
+ } else {
+ System.out.println("Skipping testHTTPProxy");
+ assert(true);
+ }
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
index fb991ec7..503db2c3 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
@@ -3,7 +3,7 @@ package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URL;
-import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;;
+import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;
public class AerisdiesRipperTest extends RippersTest {
public void testAlbum() throws IOException {
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java
new file mode 100644
index 00000000..8eb8d88f
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java
@@ -0,0 +1,18 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.DynastyscansRipper;
+
+public class DynastyscansRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
+ assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
index 4a6c3539..e29c9ece 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
@@ -17,4 +17,9 @@ public class EightmusesRipperTest extends RippersTest {
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
testRipper(ripper);
}
+
+ public void testGID() throws IOException {
+ EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
+ assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java
new file mode 100644
index 00000000..6856eb06
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper;
+
+public class GfycatporntubeRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"));
+ testRipper(ripper);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
index 144606fc..b5765047 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
@@ -9,5 +9,7 @@ public class Hentai2readRipperTest extends RippersTest {
public void testHentai2readAlbum() throws IOException {
Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/"));
testRipper(ripper);
+ ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/"));
+ testRipper(ripper);
}
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java
new file mode 100644
index 00000000..13c2798d
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java
@@ -0,0 +1,14 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.HitomiRipper;
+
+public class HitomiRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html"));
+ testRipper(ripper);
+ assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973"));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java
new file mode 100644
index 00000000..1d9ef4ad
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java
@@ -0,0 +1,25 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.HypnohubRipper;
+
+public class HypnohubRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
+ URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
+ HypnohubRipper ripper = new HypnohubRipper(poolURL);
+ testRipper(ripper);
+ ripper = new HypnohubRipper(postURL);
+ testRipper(ripper);
+ }
+ public void testGetGID() throws IOException {
+ URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
+ HypnohubRipper ripper = new HypnohubRipper(poolURL);
+ assertEquals("2303", ripper.getGID(poolURL));
+
+ URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
+ assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java
new file mode 100644
index 00000000..ca355a2c
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.ManganeloRipper;
+
+public class ManganeloRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ ManganeloRipper ripper = new ManganeloRipper(new URL("http://manganelo.com/manga/black_clover"));
+ testRipper(ripper);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
index 621d77c3..d2a9581b 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
@@ -10,4 +10,9 @@ public class ModelmayhemRipperTest extends RippersTest {
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
+ assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall")));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
index 6714195d..b4d01032 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
@@ -10,4 +10,15 @@ public class MyhentaicomicsRipperTest extends RippersTest {
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales");
+ MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
+ // Test a comic
+ assertEquals("Nienna-Lost-Tales", ripper.getGID(url));
+ // Test a search
+ assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test")));
+ // Test a tag
+ assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/")));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java
new file mode 100644
index 00000000..3353eeb5
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java
@@ -0,0 +1,18 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.NudeGalsRipper;
+
+public class NudeGalsRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
+ assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java
new file mode 100644
index 00000000..cffc807d
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java
@@ -0,0 +1,14 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.Rule34Ripper;
+
+public class Rule34RipperTest extends RippersTest {
+ public void testShesFreakyRip() throws IOException {
+ Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"));
+ testRipper(ripper);
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java
new file mode 100644
index 00000000..949e715f
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.SmuttyRipper;
+
+public class SmuttyRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/"));
+ testRipper(ripper);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java
deleted file mode 100644
index 343a72b0..00000000
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java
+++ /dev/null
@@ -1,16 +0,0 @@
-package com.rarchives.ripme.tst.ripper.rippers;
-
-import java.io.IOException;
-import java.net.URL;
-
-import com.rarchives.ripme.ripper.rippers.VineRipper;
-
-public class VineRipperTest extends RippersTest {
- // https://github.com/RipMeApp/ripme/issues/181
- /*
- public void testVineRip() throws IOException {
- VineRipper ripper = new VineRipper(new URL("https://vine.co/u/954440445776334848"));
- testRipper(ripper);
- }
- */
-}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
index 2f7dbcf9..8879c561 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
@@ -55,6 +55,7 @@ public class WordpressComicRipperTest extends RippersTest {
WordpressComicRipper ripper = new WordpressComicRipper(
new URL("http://www.konradokonski.com/sawdust/comic/get-up/"));
testRipper(ripper);
+
}
public void test_konradokonski_2() throws IOException {
@@ -63,6 +64,13 @@ public class WordpressComicRipperTest extends RippersTest {
testRipper(ripper);
}
+ public void test_konradokonski_getAlbumTitle() throws IOException {
+ URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url));
+
+ }
+
/*
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
public void test_freeadultcomix() throws IOException {
@@ -83,6 +91,32 @@ public class WordpressComicRipperTest extends RippersTest {
new URL("http://tnbtu.com/comic/01-00/"));
testRipper(ripper);
}
+
+ public void test_Eightmuses_download() throws IOException {
+ WordpressComicRipper ripper = new WordpressComicRipper(
+ new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"));
+ testRipper(ripper);
+ }
+
+ public void test_Eightmuses_getAlbumTitle() throws IOException {
+ URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses",
+ ripper.getAlbumTitle(url));
+ }
+
+ public void test_spyingwithlana_download() throws IOException {
+ WordpressComicRipper ripper = new WordpressComicRipper(
+ new URL("http://spyingwithlana.com/comic/the-big-hookup/"));
+ testRipper(ripper);
+ }
+
+ public void test_spyingwithlana_getAlbumTitle() throws IOException {
+ URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url));
+ }
+
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
// public void test_pepsaga() throws IOException {
// WordpressComicRipper ripper = new WordpressComicRipper(