diff --git a/README.md b/README.md
index e56276c1..66ef3428 100644
--- a/README.md
+++ b/README.md
@@ -30,8 +30,6 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
## [Changelog](https://github.com/ripmeapp/ripme/blob/master/ripme.json) (ripme.json)
-## [Website](http://rip.rarchives.com/)
-
# Features
* Quickly downloads all images in an online album (see supported sites below)
diff --git a/pom.xml b/pom.xml
index 77bedc9c..1bd8dc0d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
com.rarchives.ripme
ripme
jar
- 1.7.27
+ 1.7.47
ripme
http://rip.rarchives.com
diff --git a/ripme.json b/ripme.json
index 5f47ad6b..2cc83cdc 100644
--- a/ripme.json
+++ b/ripme.json
@@ -1,6 +1,26 @@
{
- "latestVersion": "1.7.27",
+ "latestVersion": "1.7.47",
"changeList": [
+ "1.7.47: Added quick queue support for hentai2read ripper; Fixed instagram ripper; SankakuComplexRipper can now download from different subdomains; Added ripper for bato.to; Added quick queue support for 8muses.download; ",
+ "1.7.46: Fixed hentai2read ripper; Rewrote the myhentaicomics ripper to use the new getAlbumsToQueue func; Can now blacklist nhentai tags; SinnercomicsRipper no longer adds -page-01 to folder names; EightmusesRipper now adds file extension to filename; disbaled test for twitch ripper",
+ "1.7.45: Fixed hentai2read ripper; ImageBam album fixed; Added various translations; TsuminoRipper no longer requires album name to download",
+ "1.7.44: Fixed instagram ripper regex",
+ "1.7.43: Fixed queryId regex in instagram ripper",
+ "1.7.42: Added user support to SmuttyRipper; Removed vine ripper; Fixed NudeGalsRipper; addURLToDownload improvments; Fixed Instagram ripper",
+ "1.7.41: Added support for spyingwithlana.com; Added ManganeloRipper; Added support for dynasty-scans.com",
+ "1.7.40: Added hypnohub.net ripper; Fixed rule34.xxx ripper; Tsumino Ripper now add .png to filenames",
+ "1.7.39: Added rule34.xxx ripper; Added Gfycatporntube.com ripper; Fixed AbstractRipper subdir bug; Added AbstractRipper unit tests",
+ "1.7.38: Added http and socks proxy support; Extended some unit tests to include getGid; Added HitomiRipper; hentaifoundry ripper now can rip all images from accounts",
+ "1.7.37: MInor code clean up; Added socks proxy support; Added support for 8muses.download; Hentaifoundry no longer errors when there are no more pages; Fix bug that causes tumblr to replace https with httpss when downloading resized images",
+ "1.7.36: Fixed Instagram ripper; Fixed hentai2read ripper test; Fixed tnbtu.com ripper",
+ "1.7.35: Fixed instagram ripper; hentai2read ripper now properly names folders",
+ "1.7.34: Added Blackbrickroadofoz Ripper; Fixed webtoons regex",
+ "1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
+ "1.7.32: Instagram ripper update to use new enpoints",
+ "1.7.31: InstaGram ripper no longer errors out when getting next page",
+ "1.7.30: Fixed usage of command-line on non-headless systems",
+ "1.7.29: Cano now download single images from imgur; Improved handling of headless mode & OS-specific config; Added modelx ripper; Fixed eroshae ripper",
+ "1.7.28: IG ripper now uses display_url when downloading images; Reddit ripper now gets erome links; Hentaifoundry Ripper no longer errors out when there is no next page",
"1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
"1.7.26: fixed instagram ripper",
"1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
diff --git a/src/main/java/com/rarchives/ripme/App.java b/src/main/java/com/rarchives/ripme/App.java
index 0bb5f3f6..d58f5d4d 100644
--- a/src/main/java/com/rarchives/ripme/App.java
+++ b/src/main/java/com/rarchives/ripme/App.java
@@ -1,5 +1,6 @@
package com.rarchives.ripme;
+import java.awt.*;
import java.io.File;
import java.io.IOException;
import java.io.BufferedReader;
@@ -18,6 +19,7 @@ import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
+import org.apache.commons.lang.SystemUtils;
import org.apache.log4j.Logger;
import com.rarchives.ripme.ripper.AbstractRipper;
@@ -25,48 +27,64 @@ import com.rarchives.ripme.ui.History;
import com.rarchives.ripme.ui.HistoryEntry;
import com.rarchives.ripme.ui.MainWindow;
import com.rarchives.ripme.ui.UpdateUtils;
+import com.rarchives.ripme.utils.Proxy;
import com.rarchives.ripme.utils.RipUtils;
import com.rarchives.ripme.utils.Utils;
/**
* Entry point to application.
+ * This is where all the fun happens, with the main method.
* Decides to display UI or to run silently via command-line.
+ *
+ * As the "controller" to all other classes, it parses command line parameters and loads the history.
*/
public class App {
- public static final Logger logger;
+ public static final Logger logger = Logger.getLogger(App.class);
private static final History HISTORY = new History();
- static {
- //initialize logger
- Utils.configureLogger();
- logger = Logger.getLogger(App.class);
- }
-
+ /**
+ * Where everything starts. Takes in, and tries to parse as many commandline arguments as possible.
+ * Otherwise, it launches a GUI.
+ *
+ * @param args Array of command line arguments.
+ */
public static void main(String[] args) throws MalformedURLException {
CommandLine cl = getArgs(args);
+
if (args.length > 0 && cl.hasOption('v')){
- logger.error(UpdateUtils.getThisJarVersion());
+ logger.info(UpdateUtils.getThisJarVersion());
System.exit(0);
}
- System.setProperty("apple.laf.useScreenMenuBar", "true");
- System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
- logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
+ if (Utils.getConfigString("proxy.http", null) != null) {
+ Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", null));
+ } else if (Utils.getConfigString("proxy.socks", null) != null) {
+ Proxy.setSocks(Utils.getConfigString("proxy.socks", null));
+ }
- if (args.length > 0) {
- // CLI Mode
+ if (GraphicsEnvironment.isHeadless() || args.length > 0) {
handleArguments(args);
} else {
- // GUI Mode
+ if (SystemUtils.IS_OS_MAC_OSX) {
+ System.setProperty("apple.laf.useScreenMenuBar", "true");
+ System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
+ }
+
+ Utils.configureLogger();
+
+ logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
+
MainWindow mw = new MainWindow();
SwingUtilities.invokeLater(mw);
}
}
+
/**
* Creates an abstract ripper and instructs it to rip.
* @param url URL to be ripped
- * @throws Exception
+ * @throws Exception Nothing too specific here, just a catch-all.
+ *
*/
private static void rip(URL url) throws Exception {
AbstractRipper ripper = AbstractRipper.getRipper(url);
@@ -80,20 +98,45 @@ public class App {
*/
private static void handleArguments(String[] args) {
CommandLine cl = getArgs(args);
- if (cl.hasOption('h')) {
+
+ //Help (list commands)
+ if (cl.hasOption('h') || args.length == 0) {
HelpFormatter hf = new HelpFormatter();
hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
System.exit(0);
}
+
+ Utils.configureLogger();
+ logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
+
+ //Allow file overwriting
if (cl.hasOption('w')) {
Utils.setConfigBoolean("file.overwrite", true);
}
+
+ //SOCKS proxy server
+ if (cl.hasOption('s')) {
+ String sservfull = cl.getOptionValue('s').trim();
+ Proxy.setSocks(sservfull);
+ }
+
+ //HTTP proxy server
+ if (cl.hasOption('p')) {
+ String proxyserverfull = cl.getOptionValue('p').trim();
+ Proxy.setHTTPProxy(proxyserverfull);
+ }
+
+ //Number of threads
if (cl.hasOption('t')) {
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
}
+
+ //Ignore 404
if (cl.hasOption('4')) {
Utils.setConfigBoolean("errors.skip404", true);
}
+
+ //Re-rip all previous albums
if (cl.hasOption('r')) {
// Re-rip all via command-line
List history = Utils.getConfigList("download.history");
@@ -115,6 +158,8 @@ public class App {
// Exit
System.exit(0);
}
+
+ //Re-rip all selected albums
if (cl.hasOption('R')) {
loadHistory();
if (HISTORY.toList().isEmpty()) {
@@ -146,20 +191,30 @@ public class App {
System.exit(-1);
}
}
+
+ //Save the order of images in album
if (cl.hasOption('d')) {
Utils.setConfigBoolean("download.save_order", true);
}
+
+ //Don't save the order of images in album
if (cl.hasOption('D')) {
Utils.setConfigBoolean("download.save_order", false);
}
+
+ //In case specify both, break and exit since it isn't possible.
if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
logger.error("\nCannot specify '-d' and '-D' simultaneously");
System.exit(-1);
}
+
+ //Destination directory
if (cl.hasOption('l')) {
// change the default rips directory
Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
}
+
+ //Read URLs from File
if (cl.hasOption('f')) {
String filename = cl.getOptionValue('f');
try {
@@ -175,10 +230,13 @@ public class App {
logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
}
}
+
+ //The URL to rip.
if (cl.hasOption('u')) {
String url = cl.getOptionValue('u').trim();
ripURL(url, cl.hasOption("n"));
}
+
}
/**
@@ -226,6 +284,8 @@ public class App {
opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
opts.addOption("v", "version", false, "Show current version");
+ opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
+ opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
return opts;
}
@@ -244,7 +304,7 @@ public class App {
return null;
}
}
-
+
/**
* Loads history from history file into memory.
*/
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
index e0fd3548..2eefe873 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
@@ -11,6 +11,7 @@ import org.jsoup.nodes.Document;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Utils;
+import com.rarchives.ripme.ui.MainWindow;
/**
* Simplified ripper, designed for ripping from sites by parsing HTML.
@@ -53,12 +54,29 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
protected boolean hasDescriptionSupport() {
return false;
}
+
protected String[] getDescription(String url, Document page) throws IOException {
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
}
protected int descSleepTime() {
return 100;
}
+
+ protected List getAlbumsToQueue(Document doc) {
+ return null;
+ }
+
+ // If a page has Queue support then it has no images we want to download, just a list of urls we want to add to
+ // the queue
+ protected boolean hasQueueSupport() {
+ return false;
+ }
+
+ // Takes a url and checks if it is for a page of albums
+ protected boolean pageContainsAlbums(URL url) {
+ return false;
+ }
+
@Override
public void rip() throws IOException {
int index = 0;
@@ -67,6 +85,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
Document doc = getFirstPage();
+ if (hasQueueSupport() && pageContainsAlbums(this.url)) {
+ List urls = getAlbumsToQueue(doc);
+ for (String url : urls) {
+ MainWindow.addUrlToQueue(url);
+ }
+
+ // We set doc to null here so the while loop below this doesn't fire
+ doc = null;
+ }
+
while (doc != null) {
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
index 6068ed18..20889495 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
@@ -192,7 +192,8 @@ public abstract class AbstractRipper
* True if downloaded successfully
* False if failed to download
*/
- protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
+ protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies,
+ Boolean getFileExtFromMIME);
/**
* Queues image to be downloaded and saved.
@@ -212,7 +213,7 @@ public abstract class AbstractRipper
* True if downloaded successfully
* False if failed to download
*/
- protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) {
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension, Boolean getFileExtFromMIME) {
// Don't re-add the url if it was downloaded in a previous rip
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
if (hasDownloadedURL(url.toExternalForm())) {
@@ -228,21 +229,7 @@ public abstract class AbstractRipper
return false;
}
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
- String saveAs;
- if (fileName != null) {
- saveAs = fileName;
- // Get the extension of the file
- String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
- saveAs = saveAs + "." + extension;
- } else {
- saveAs = url.toExternalForm();
- saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
- }
-
- if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
- if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
- if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
- if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
+ String saveAs = getFileName(url, fileName, extension);
File saveFileAs;
try {
if (!subdirectory.equals("")) {
@@ -271,7 +258,15 @@ public abstract class AbstractRipper
logger.debug("Unable to write URL history file");
}
}
- return addURLToDownload(url, saveFileAs, referrer, cookies);
+ return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
+ }
+
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension) {
+ return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false);
+ }
+
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) {
+ return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, null);
}
/**
@@ -306,6 +301,35 @@ public abstract class AbstractRipper
return addURLToDownload(url, prefix, "");
}
+ public static String getFileName(URL url, String fileName, String extension) {
+ String saveAs;
+ if (fileName != null) {
+ saveAs = fileName;
+ } else {
+ saveAs = url.toExternalForm();
+ saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
+ }
+ if (extension == null) {
+ // Get the extension of the file
+ String[] lastBitOfURL = url.toExternalForm().split("/");
+
+ String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split(".");
+ if (lastBit.length != 0) {
+ extension = lastBit[lastBit.length - 1];
+ saveAs = saveAs + "." + extension;
+ }
+ }
+
+ if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
+ if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
+ if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
+ if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
+ if (extension != null) {
+ saveAs = saveAs + "." + extension;
+ }
+ return saveAs;
+ }
+
/**
* Waits for downloading threads to complete.
diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
index 1726343a..f700f012 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
@@ -50,7 +50,7 @@ public abstract class AlbumRipper extends AbstractRipper {
/**
* Queues multiple URLs of single images to download from a single Album URL
*/
- public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) {
// Only download one file if this is a test.
if (super.isThisATest() &&
(itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
@@ -82,7 +82,7 @@ public abstract class AlbumRipper extends AbstractRipper {
}
else {
itemsPending.put(url, saveAs);
- DownloadFileThread dft = new DownloadFileThread(url, saveAs, this);
+ DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME);
if (referrer != null) {
dft.setReferrer(referrer);
}
@@ -96,7 +96,7 @@ public abstract class AlbumRipper extends AbstractRipper {
@Override
public boolean addURLToDownload(URL url, File saveAs) {
- return addURLToDownload(url, saveAs, null, null);
+ return addURLToDownload(url, saveAs, null, null, false);
}
/**
diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
index c62d58a6..42dedffe 100644
--- a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
+++ b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
@@ -8,6 +8,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
+import java.net.URLConnection;
import java.util.HashMap;
import java.util.Map;
@@ -36,10 +37,11 @@ class DownloadFileThread extends Thread {
private String prettySaveAs;
private AbstractRipper observer;
private int retries;
+ private Boolean getFileExtFromMIME;
private final int TIMEOUT;
- public DownloadFileThread(URL url, File saveAs, AbstractRipper observer) {
+ public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) {
super();
this.url = url;
this.saveAs = saveAs;
@@ -47,6 +49,7 @@ class DownloadFileThread extends Thread {
this.observer = observer;
this.retries = Utils.getConfigInteger("download.retries", 1);
this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
+ this.getFileExtFromMIME = getFileExtFromMIME;
}
public void setReferrer(String referrer) {
@@ -143,9 +146,15 @@ class DownloadFileThread extends Thread {
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
return;
}
-
// Save file
bis = new BufferedInputStream(huc.getInputStream());
+
+ // Check if we should get the file ext from the MIME type
+ if (getFileExtFromMIME) {
+ String fileExt = URLConnection.guessContentTypeFromStream(bis).replaceAll("image/", "");
+ saveAs = new File(saveAs.toString() + "." + fileExt);
+ }
+
fos = new FileOutputStream(saveAs);
IOUtils.copy(bis, fos);
break; // Download successful: break out of infinite loop
diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
index 13008cd9..29200d5a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
@@ -10,6 +10,7 @@ import java.util.Map;
import com.rarchives.ripme.ui.RipStatusMessage;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Utils;
+import com.sun.org.apache.xpath.internal.operations.Bool;
public abstract class VideoRipper extends AbstractRipper {
@@ -70,7 +71,7 @@ public abstract class VideoRipper extends AbstractRipper {
}
@Override
- public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) {
return addURLToDownload(url, saveAs);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
new file mode 100644
index 00000000..a3350e68
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
@@ -0,0 +1,137 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONObject;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class BatoRipper extends AbstractHTMLRipper {
+
+ public BatoRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "bato";
+ }
+
+ @Override
+ public String getDomain() {
+ return "bato.to";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ // As this is just for quick queue support it does matter what this if returns
+ p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return "";
+ }
+ throw new MalformedURLException("Expected bato.to URL format: " +
+ "bato.to/chapter/ID - got " + url + " instead");
+ }
+
+ @Override
+ public boolean hasQueueSupport() {
+ return true;
+ }
+
+ @Override
+ public boolean pageContainsAlbums(URL url) {
+ Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public List getAlbumsToQueue(Document doc) {
+ List urlsToAddToQueue = new ArrayList<>();
+ for (Element elem : doc.select("div.main > div > a")) {
+ urlsToAddToQueue.add("https://" + getDomain() + elem.attr("href"));
+ }
+ return urlsToAddToQueue;
+ }
+
+ @Override
+ public String getAlbumTitle(URL url) throws MalformedURLException {
+ try {
+ // Attempt to use album title as GID
+ return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
+ } catch (IOException e) {
+ // Fall back to default album naming convention
+ logger.info("Unable to find title at " + url);
+ }
+ return super.getAlbumTitle(url);
+ }
+
+ @Override
+ public boolean canRip(URL url) {
+ Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return true;
+ }
+
+ p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ for (Element script : doc.select("script")) {
+ if (script.data().contains("var images = ")) {
+ String s = script.data();
+ s = s.replaceAll("var seriesId = \\d+;", "");
+ s = s.replaceAll("var chapterId = \\d+;", "");
+ s = s.replaceAll("var pages = \\d+;", "");
+ s = s.replaceAll("var page = \\d+;", "");
+ s = s.replaceAll("var prevCha = null;", "");
+ s = s.replaceAll("var nextCha = \\.*;", "");
+ String json = s.replaceAll("var images = ", "").replaceAll(";", "");
+ logger.info(s);
+ JSONObject images = new JSONObject(json);
+ for (int i = 1; i < images.length() +1; i++) {
+ result.add(images.getString(Integer.toString(i)));
+ }
+
+ }
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java
new file mode 100644
index 00000000..cb5d4b14
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java
@@ -0,0 +1,76 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class BlackbrickroadofozRipper extends AbstractHTMLRipper {
+
+ public BlackbrickroadofozRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "blackbrickroadofoz";
+ }
+
+ @Override
+ public String getDomain() {
+ return "blackbrickroadofoz.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://www.blackbrickroadofoz.com/comic/([a-zA-Z0-9_-]*)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected blackbrickroadofoz URL format: " +
+ "www.blackbrickroadofoz.com/comic/PAGE - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ sleep(1000);
+ Element elem = doc.select("div[id=topnav] > nav.cc-nav > a.cc-next").first();
+ if (elem == null) {
+ throw new IOException("No more pages");
+ }
+ String nextPage = elem.attr("href");
+ return Http.url(nextPage).get();
+
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ Element elem = doc.select("div[id=cc-comicbody] > a > img[id=cc-comic]").first();
+ // The site doesn't return properly encoded urls we replace all spaces ( ) with %20
+ result.add(elem.attr("src").replaceAll(" ", "%20"));
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
index fb29171b..7ccf558c 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
@@ -59,7 +59,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
String nextPage = elem.attr("href");
// Some times this returns a empty string
// This for stops that
- if (nextPage == "") {
+ if (nextPage.equals("")) {
return null;
}
else {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
index 4461188e..f7f9f393 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
@@ -1,5 +1,9 @@
package com.rarchives.ripme.ripper.rippers;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Base64;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
@@ -13,7 +17,6 @@ import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
@@ -22,11 +25,6 @@ import org.jsoup.nodes.Element;
import org.jsoup.safety.Whitelist;
import org.jsoup.select.Elements;
-import com.rarchives.ripme.ripper.AbstractHTMLRipper;
-import com.rarchives.ripme.utils.Base64;
-import com.rarchives.ripme.utils.Http;
-import com.rarchives.ripme.utils.Utils;
-
public class DeviantartRipper extends AbstractHTMLRipper {
private static final int PAGE_SLEEP_TIME = 3000,
@@ -108,19 +106,46 @@ public class DeviantartRipper extends AbstractHTMLRipper {
throw new MalformedURLException("Expected URL format: http://username.deviantart.com/[/gallery/#####], got: " + url);
}
+ /**
+ * Gets first page.
+ * Will determine if login is supplied,
+ * if there is a login, then login and add that login cookies.
+ * Otherwise, just bypass the age gate with an anonymous flag.
+ * @return
+ * @throws IOException
+ */
@Override
public Document getFirstPage() throws IOException {
- // Login
- try {
- cookies = loginToDeviantart();
- } catch (Exception e) {
- logger.warn("Failed to login: ", e);
+
+ //Test to see if there is a login:
+ String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
+ String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
+
+ if (username == null || password == null) {
+ logger.debug("No DeviantArt login provided.");
cookies.put("agegate_state","1"); // Bypasses the age gate
+ } else {
+ // Attempt Login
+ try {
+ cookies = loginToDeviantart();
+ } catch (IOException e) {
+ logger.warn("Failed to login: ", e);
+ cookies.put("agegate_state","1"); // Bypasses the age gate
+ }
}
+
+
return Http.url(this.url)
.cookies(cookies)
.get();
}
+
+ /**
+ *
+ * @param page
+ * @param id
+ * @return
+ */
private String jsonToImage(Document page, String id) {
Elements js = page.select("script[type=\"text/javascript\"]");
for (Element tag : js) {
@@ -444,4 +469,4 @@ public class DeviantartRipper extends AbstractHTMLRipper {
// We are logged in, save the cookies
return resp.cookies();
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java
new file mode 100644
index 00000000..37d3ad93
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java
@@ -0,0 +1,84 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONArray;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class DynastyscansRipper extends AbstractHTMLRipper {
+
+ public DynastyscansRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "dynasty-scans";
+ }
+
+ @Override
+ public String getDomain() {
+ return "dynasty-scans.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://dynasty-scans.com/chapters/([\\S]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected dynasty-scans URL format: " +
+ "dynasty-scans.com/chapters/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("a[id=next_link]").first();
+ if (elem == null || elem.attr("href").equals("#")) {
+ throw new IOException("No more pages");
+ }
+ return Http.url("https://dynasty-scans.com" + elem.attr("href")).get();
+
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ String jsonText = null;
+ for (Element script : doc.select("script")) {
+ if (script.data().contains("var pages")) {
+ jsonText = script.data().replaceAll("var pages = ", "");
+ jsonText = jsonText.replaceAll("//", "");
+ }
+ }
+ JSONArray imageArray = new JSONArray(jsonText);
+ for (int i = 0; i < imageArray.length(); i++) {
+ result.add("https://dynasty-scans.com" + imageArray.getJSONObject(i).getString("image"));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
index 77ca9102..3e06695b 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
@@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -127,9 +126,9 @@ public class EightmusesRipper extends AbstractHTMLRipper {
image = getFullSizeImage(imageHref);
URL imageUrl = new URL(image);
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
- addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "");
+ addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true);
} else {
- addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
+ addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true);
}
// X is our page index
x++;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
index 30dcfd4f..737b8092 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
@@ -86,7 +86,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
throw new IOException("No more pages");
}
nextUrl = elem.attr("href");
- if (nextUrl == "") {
+ if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
return Http.url("eroshae.com" + nextUrl).get();
@@ -119,7 +119,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
- imageURL = "https:" + imageURL;
+ imageURL = imageURL;
URLs.add(imageURL);
}
}
@@ -129,7 +129,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
- URLs.add("https:" + videoURL);
+ URLs.add(videoURL);
}
}
// Profile videos
@@ -148,7 +148,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
- URLs.add("https:" + videoURL);
+ URLs.add(videoURL);
}
}
}
@@ -209,7 +209,6 @@ public class EroShareRipper extends AbstractHTMLRipper {
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
- imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL));
}
}
@@ -219,7 +218,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
- URLs.add(new URL("https:" + videoURL));
+ URLs.add(new URL(videoURL));
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
index 7e532943..93aedba2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
@@ -332,7 +332,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
String[] fields = u.split("/");
String prefix = getPrefix(index) + fields[fields.length - 3];
File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg");
- addURLToDownload(url, saveAs, "", null);
+ addURLToDownload(url, saveAs, "", null, false);
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
index 973796cf..68aa950a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
@@ -13,7 +13,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.rarchives.ripme.utils.Utils;
-import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
@@ -23,7 +22,6 @@ import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.DownloadThreadPool;
-import com.rarchives.ripme.utils.Base64;
import com.rarchives.ripme.utils.Http;
public class FuraffinityRipper extends AbstractHTMLRipper {
@@ -162,10 +160,6 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
if (!subdirectory.equals("")) {
subdirectory = File.separator + subdirectory;
}
- int o = url.toString().lastIndexOf('/')-1;
- String test = url.toString().substring(url.toString().lastIndexOf('/',o)+1);
- test = test.replace("/",""); // This is probably not the best way to do this.
- test = test.replace("\\",""); // CLOSE ENOUGH!
saveFileAs = new File(
workingDir.getCanonicalPath()
+ subdirectory
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java
new file mode 100644
index 00000000..504b89d6
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java
@@ -0,0 +1,61 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class GfycatporntubeRipper extends AbstractHTMLRipper {
+
+ public GfycatporntubeRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "gfycatporntube";
+ }
+
+ @Override
+ public String getDomain() {
+ return "gfycatporntube.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://gfycatporntube.com/([a-zA-Z1-9_-]*)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected gfycatporntube URL format: " +
+ "gfycatporntube.com/NAME - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ result.add(doc.select("source[id=mp4Source]").attr("src"));
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
index 6e76bbc6..e0dbff17 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
@@ -31,41 +31,68 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
return "hentai2read.com";
}
- @Override
- public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("https://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
- Matcher m = p.matcher(url.toExternalForm());
- if (m.matches()) {
- return m.group(1);
- }
- throw new MalformedURLException("Expected hentai2read.com URL format: " +
- "hbrowse.com/COMICID - got " + url + " instead");
+ @Override
+ public boolean hasQueueSupport() {
+ return true;
+ }
+
+ @Override
+ public boolean pageContainsAlbums(URL url) {
+ logger.info("Page contains albums");
+ Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
+ Matcher mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ return true;
}
+ return false;
+ }
+
+ @Override
+ public List getAlbumsToQueue(Document doc) {
+ List urlsToAddToQueue = new ArrayList<>();
+ for (Element elem : doc.select(".nav-chapters > li > div.media > a")) {
+ urlsToAddToQueue.add(elem.attr("href"));
+ }
+ return urlsToAddToQueue;
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d+)?/?");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1) + "_" + m.group(2);
+ }
+ throw new MalformedURLException("Expected hentai2read.com URL format: " +
+ "hentai2read.com/COMICID - got " + url + " instead");
+ }
@Override
public Document getFirstPage() throws IOException {
- Document tempDoc;
- // get the first page of the comic
- if (url.toExternalForm().substring(url.toExternalForm().length() - 1).equals("/")) {
- tempDoc = Http.url(url + "1").get();
- } else {
- tempDoc = Http.url(url + "/1").get();
- }
- for (Element el : tempDoc.select("ul.nav > li > a")) {
- if (el.attr("href").startsWith("https://hentai2read.com/thumbnails/")) {
- // Get the page with the thumbnails
- return Http.url(el.attr("href")).get();
+ String thumbnailLink;
+ try {
+ // If the page contains albums we want to load the main page
+ if (pageContainsAlbums(url)) {
+ return Http.url(url).get();
}
+ Document tempDoc;
+ tempDoc = Http.url(url).get();
+ // Get the thumbnail page so we can rip all images without loading every page in the comic
+ thumbnailLink = tempDoc.select("div.col-xs-12 > div.reader-controls > div.controls-block > button > a").attr("href");
+ if (!thumbnailLink.equals("")) {
+ return Http.url(thumbnailLink).get();
+ } else {
+ return Http.url(tempDoc.select("a[data-original-title=Thumbnails").attr("href")).get();
+ }
+ } catch (IOException e) {
+ throw new IOException("Unable to get first page");
}
- throw new IOException("Unable to get first page");
}
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
try {
- Document doc = getFirstPage();
- String title = doc.select("span[itemprop=title]").text();
- return getHost() + "_" + title;
+ return getHost() + "_" + getGID(url);
} catch (Exception e) {
// Fall back to default album naming convention
logger.warn("Failed to get album title from " + url, e);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
index 561c4249..8d953de1 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
@@ -10,6 +10,7 @@ import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
@@ -49,19 +50,61 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
@Override
public Document getFirstPage() throws IOException {
- Response resp = Http.url("http://www.hentai-foundry.com/").response();
- cookies = resp.cookies();
+ Response resp;
+ Document doc;
+
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .response();
// The only cookie that seems to matter in getting around the age wall is the phpsession cookie
cookies.putAll(resp.cookies());
- sleep(500);
+
+ doc = resp.parse();
+ String csrf_token = doc.select("input[name=YII_CSRF_TOKEN]")
+ .first().attr("value");
+ if (csrf_token != null) {
+ Map data = new HashMap<>();
+ data.put("YII_CSRF_TOKEN" , csrf_token);
+ data.put("rating_nudity" , "3");
+ data.put("rating_violence" , "3");
+ data.put("rating_profanity", "3");
+ data.put("rating_racism" , "3");
+ data.put("rating_sex" , "3");
+ data.put("rating_spoilers" , "3");
+ data.put("rating_yaoi" , "1");
+ data.put("rating_yuri" , "1");
+ data.put("rating_teen" , "1");
+ data.put("rating_guro" , "1");
+ data.put("rating_furry" , "1");
+ data.put("rating_beast" , "1");
+ data.put("rating_male" , "1");
+ data.put("rating_female" , "1");
+ data.put("rating_futa" , "1");
+ data.put("rating_other" , "1");
+ data.put("rating_scat" , "1");
+ data.put("rating_incest" , "1");
+ data.put("rating_rape" , "1");
+ data.put("filter_media" , "A");
+ data.put("filter_order" , "date_new");
+ data.put("filter_type" , "0");
+
+ resp = Http.url("http://www.hentai-foundry.com/site/filters")
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .data(data)
+ .method(Method.POST)
+ .response();
+ cookies.putAll(resp.cookies());
+ }
+ else {
+ logger.info("unable to find csrf_token and set filter");
+ }
+
resp = Http.url(url)
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
+ .referrer("http://www.hentai-foundry.com/")
+ .cookies(cookies)
+ .response();
cookies.putAll(resp.cookies());
return resp.parse();
}
@@ -74,12 +117,16 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
}
Elements els = doc.select("li.next > a");
Element first = els.first();
- String nextURL = first.attr("href");
- nextURL = "http://www.hentai-foundry.com" + nextURL;
- return Http.url(nextURL)
- .referrer(url)
- .cookies(cookies)
- .get();
+ try {
+ String nextURL = first.attr("href");
+ nextURL = "http://www.hentai-foundry.com" + nextURL;
+ return Http.url(nextURL)
+ .referrer(url)
+ .cookies(cookies)
+ .get();
+ } catch (NullPointerException e) {
+ throw new IOException("No more pages");
+ }
}
@Override
@@ -97,13 +144,6 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
}
Document imagePage;
try {
- Response resp = Http.url("http://www.hentai-foundry.com/").response();
- cookies = resp.cookies();
- resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
- .referrer("http://www.hentai-foundry.com/")
- .cookies(cookies)
- .response();
- cookies.putAll(resp.cookies());
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
new file mode 100644
index 00000000..3e92cc61
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
@@ -0,0 +1,73 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class HitomiRipper extends AbstractHTMLRipper {
+
+ String galleryId = "";
+
+ public HitomiRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "hitomi";
+ }
+
+ @Override
+ public String getDomain() {
+ return "hitomi.la";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ galleryId = m.group(1);
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected hitomi URL format: " +
+ "https://hitomi.la/galleries/ID.html - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // if we go to /GALLERYID.js we get a nice json array of all images in the gallery
+ return Http.url(new URL(url.toExternalForm().replaceAll(".html", ".js"))).ignoreContentType().get();
+ }
+
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ String json = doc.text().replaceAll("var galleryinfo =", "");
+ logger.info(json);
+ JSONArray json_data = new JSONArray(json);
+ for (int i = 0; i < json_data.length(); i++) {
+ result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
new file mode 100644
index 00000000..1eabefb9
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
@@ -0,0 +1,91 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import javax.print.Doc;
+
+public class HypnohubRipper extends AbstractHTMLRipper {
+
+ public HypnohubRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "hypnohub";
+ }
+
+ @Override
+ public String getDomain() {
+ return "hypnohub.net";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/([\\S]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1) + "_" + m.group(2);
+ }
+ throw new MalformedURLException("Expected cfake URL format: " +
+ "hypnohub.net/pool/show/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ private String ripPost(String url) throws IOException {
+ logger.info(url);
+ Document doc = Http.url(url).get();
+ return "https:" + doc.select("img.image").attr("src");
+
+ }
+
+ private String ripPost(Document doc) {
+ logger.info(url);
+ return "https:" + doc.select("img.image").attr("src");
+
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ if (url.toExternalForm().contains("/pool")) {
+ for (Element el : doc.select("ul[id=post-list-posts] > li > div > a.thumb")) {
+ try {
+ result.add(ripPost("https://hypnohub.net" + el.attr("href")));
+ } catch (IOException e) {
+ return result;
+ }
+ }
+ } else if (url.toExternalForm().contains("/post")) {
+ result.add(ripPost(doc));
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
index 323ad1de..b33f5624 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
@@ -1,5 +1,9 @@
package com.rarchives.ripme.ripper.rippers;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
@@ -7,16 +11,10 @@ import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
-import com.rarchives.ripme.ripper.AbstractHTMLRipper;
-import com.rarchives.ripme.ripper.DownloadThreadPool;
-import com.rarchives.ripme.utils.Http;
-import com.rarchives.ripme.utils.Utils;
-
public class ImagebamRipper extends AbstractHTMLRipper {
// Current HTML document
@@ -71,7 +69,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
public Document getNextPage(Document doc) throws IOException {
// Find next page
Elements hrefs = doc.select("a.pagination_current + a.pagination_link");
- if (hrefs.size() == 0) {
+ if (hrefs.isEmpty()) {
throw new IOException("No more pages");
}
String nextUrl = "http://www.imagebam.com" + hrefs.first().attr("href");
@@ -121,8 +119,8 @@ public class ImagebamRipper extends AbstractHTMLRipper {
* Handles case when site has IP-banned the user.
*/
private class ImagebamImageThread extends Thread {
- private URL url;
- private int index;
+ private URL url; //link to "image page"
+ private int index; //index in album
ImagebamImageThread(URL url, int index) {
super();
@@ -134,28 +132,43 @@ public class ImagebamRipper extends AbstractHTMLRipper {
public void run() {
fetchImage();
}
-
+
+ /**
+ * Rips useful image from "image page"
+ */
private void fetchImage() {
try {
Document doc = Http.url(url).get();
// Find image
- Elements images = doc.select(".image-container img");
- if (images.size() == 0) {
+ Elements metaTags = doc.getElementsByTag("meta");
+
+ String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen.
+
+ for (Element metaTag: metaTags) {
+ //the direct link to the image seems to always be linked in the part of the html.
+ if (metaTag.attr("property").equals("og:image")) {
+ imgsrc = metaTag.attr("content");
+ logger.info("Found URL " + imgsrc);
+ break;//only one (useful) image possible for an "image page".
+ }
+ }
+
+ //for debug, or something goes wrong.
+ if (imgsrc.isEmpty()) {
logger.warn("Image not found at " + this.url);
return;
}
- Element image = images.first();
- String imgsrc = image.attr("src");
- logger.info("Found URL " + imgsrc);
+
// Provide prefix and let the AbstractRipper "guess" the filename
String prefix = "";
if (Utils.getConfigBoolean("download.save_order", true)) {
prefix = String.format("%03d_", index);
}
+
addURLToDownload(new URL(imgsrc), prefix);
} catch (IOException e) {
logger.error("[!] Exception while loading/parsing " + this.url, e);
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
index b595d9e2..fe7937d3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
@@ -36,6 +36,7 @@ public class ImgurRipper extends AlbumRipper {
USER,
USER_ALBUM,
USER_IMAGES,
+ SINGLE_IMAGE,
SERIES_OF_IMAGES,
SUBREDDIT
}
@@ -155,34 +156,48 @@ public class ImgurRipper extends AlbumRipper {
@Override
public void rip() throws IOException {
switch (albumType) {
- case ALBUM:
- // Fall-through
- case USER_ALBUM:
- logger.info("Album type is USER_ALBUM");
- // Don't call getAlbumTitle(this.url) with this
- // as it seems to cause the album to be downloaded to a subdir.
- ripAlbum(this.url);
- break;
- case SERIES_OF_IMAGES:
- logger.info("Album type is SERIES_OF_IMAGES");
- ripAlbum(this.url);
- break;
- case USER:
- logger.info("Album type is USER");
- ripUserAccount(url);
- break;
- case SUBREDDIT:
- logger.info("Album type is SUBREDDIT");
- ripSubreddit(url);
- break;
- case USER_IMAGES:
- logger.info("Album type is USER_IMAGES");
- ripUserImages(url);
- break;
+ case ALBUM:
+ // Fall-through
+ case USER_ALBUM:
+ logger.info("Album type is USER_ALBUM");
+ // Don't call getAlbumTitle(this.url) with this
+ // as it seems to cause the album to be downloaded to a subdir.
+ ripAlbum(this.url);
+ break;
+ case SERIES_OF_IMAGES:
+ logger.info("Album type is SERIES_OF_IMAGES");
+ ripAlbum(this.url);
+ break;
+ case SINGLE_IMAGE:
+ logger.info("Album type is SINGLE_IMAGE");
+ ripSingleImage(this.url);
+ break;
+ case USER:
+ logger.info("Album type is USER");
+ ripUserAccount(url);
+ break;
+ case SUBREDDIT:
+ logger.info("Album type is SUBREDDIT");
+ ripSubreddit(url);
+ break;
+ case USER_IMAGES:
+ logger.info("Album type is USER_IMAGES");
+ ripUserImages(url);
+ break;
}
waitForThreads();
}
+ private void ripSingleImage(URL url) throws IOException {
+ String strUrl = url.toExternalForm();
+ Document document = getDocument(strUrl);
+ Matcher m = getEmbeddedJsonMatcher(document);
+ if (m.matches()) {
+ JSONObject json = new JSONObject(m.group(1)).getJSONObject("image");
+ addURLToDownload(extractImageUrlFromJson(json), "");
+ }
+ }
+
private void ripAlbum(URL url) throws IOException {
ripAlbum(url, "");
}
@@ -257,38 +272,16 @@ public class ImgurRipper extends AlbumRipper {
strUrl += "/all";
}
logger.info(" Retrieving " + strUrl);
- Document doc = Jsoup.connect(strUrl)
- .userAgent(USER_AGENT)
- .timeout(10 * 1000)
- .maxBodySize(0)
- .get();
-
+ Document doc = getDocument(strUrl);
// Try to use embedded JSON to retrieve images
- Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
- Matcher m = p.matcher(doc.body().html());
+ Matcher m = getEmbeddedJsonMatcher(doc);
if (m.matches()) {
try {
- ImgurAlbum imgurAlbum = new ImgurAlbum(url);
JSONObject json = new JSONObject(m.group(1));
- JSONArray images = json.getJSONObject("image")
+ JSONArray jsonImages = json.getJSONObject("image")
.getJSONObject("album_images")
.getJSONArray("images");
- int imagesLength = images.length();
- for (int i = 0; i < imagesLength; i++) {
- JSONObject image = images.getJSONObject(i);
- String ext = image.getString("ext");
- if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
- ext = ".mp4";
- }
- URL imageURL = new URL(
- "http://i.imgur.com/"
- + image.getString("hash")
- + ext);
- ImgurImage imgurImage = new ImgurImage(imageURL);
- imgurImage.extension = ext;
- imgurAlbum.addImage(imgurImage);
- }
- return imgurAlbum;
+ return createImgurAlbumFromJsonArray(url, jsonImages);
} catch (JSONException e) {
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
}
@@ -330,6 +323,44 @@ public class ImgurRipper extends AlbumRipper {
return imgurAlbum;
}
+ private static Matcher getEmbeddedJsonMatcher(Document doc) {
+ Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
+ return p.matcher(doc.body().html());
+ }
+
+ private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException {
+ ImgurAlbum imgurAlbum = new ImgurAlbum(url);
+ int imagesLength = jsonImages.length();
+ for (int i = 0; i < imagesLength; i++) {
+ JSONObject jsonImage = jsonImages.getJSONObject(i);
+ imgurAlbum.addImage(createImgurImageFromJson(jsonImage));
+ }
+ return imgurAlbum;
+ }
+
+ private static ImgurImage createImgurImageFromJson(JSONObject json) throws MalformedURLException {
+ return new ImgurImage(extractImageUrlFromJson(json));
+ }
+
+ private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException {
+ String ext = json.getString("ext");
+ if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
+ ext = ".mp4";
+ }
+ return new URL(
+ "http://i.imgur.com/"
+ + json.getString("hash")
+ + ext);
+ }
+
+ private static Document getDocument(String strUrl) throws IOException {
+ return Jsoup.connect(strUrl)
+ .userAgent(USER_AGENT)
+ .timeout(10 * 1000)
+ .maxBodySize(0)
+ .get();
+ }
+
/**
* Rips all albums in an imgur user's account.
* @param url
@@ -507,6 +538,13 @@ public class ImgurRipper extends AlbumRipper {
this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid);
return "r_" + subreddit + "_" + gid;
}
+ p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ // Single imgur image
+ albumType = ALBUM_TYPE.SINGLE_IMAGE;
+ return m.group(m.groupCount());
+ }
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
index bb2998f8..12227418 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
@@ -3,6 +3,7 @@ package com.rarchives.ripme.ripper.rippers;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
@@ -12,27 +13,38 @@ import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import java.security.*;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
-import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
import com.rarchives.ripme.utils.Http;
+import org.jsoup.Connection;
+import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ui.RipStatusMessage;
import com.rarchives.ripme.utils.Utils;
+import java.util.HashMap;
-public class InstagramRipper extends AbstractHTMLRipper {
+public class InstagramRipper extends AbstractJSONRipper {
String nextPageID = "";
private String qHash;
private boolean rippingTag = false;
private String tagName;
private String userID;
+ private String rhx_gis = null;
+ private String csrftoken;
+ // Run into a weird issue with Jsoup cutting some json pages in half, this is a work around
+ // see https://github.com/RipMeApp/ripme/issues/601
+ private String workAroundJsonString;
+
+
public InstagramRipper(URL url) throws IOException {
super(url);
@@ -65,11 +77,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
return url.replaceAll("/[A-Z0-9]{8}/", "/");
}
- private List getPostsFromSinglePage(Document Doc) {
+ private List getPostsFromSinglePage(JSONObject json) {
List imageURLs = new ArrayList<>();
JSONArray datas;
- try {
- JSONObject json = getJSONFromPage(Doc);
if (json.getJSONObject("entry_data").getJSONArray("PostPage")
.getJSONObject(0).getJSONObject("graphql").getJSONObject("shortcode_media")
.has("edge_sidecar_to_children")) {
@@ -95,10 +105,6 @@ public class InstagramRipper extends AbstractHTMLRipper {
}
}
return imageURLs;
- } catch (IOException e) {
- logger.error("Unable to get JSON from page " + url.toExternalForm());
- return null;
- }
}
@Override
@@ -176,11 +182,14 @@ public class InstagramRipper extends AbstractHTMLRipper {
}
@Override
- public Document getFirstPage() throws IOException {
- Document p = Http.url(url).get();
+ public JSONObject getFirstPage() throws IOException {
+ Connection.Response resp = Http.url(url).response();
+ logger.info(resp.cookies());
+ csrftoken = resp.cookie("csrftoken");
+ Document p = resp.parse();
// Get the query hash so we can download the next page
qHash = getQHash(p);
- return p;
+ return getJSONFromPage(p);
}
private String getVideoFromPage(String videoID) {
@@ -224,16 +233,13 @@ public class InstagramRipper extends AbstractHTMLRipper {
}
@Override
- public List getURLsFromPage(Document doc) {
+ public List getURLsFromJSON(JSONObject json) {
List imageURLs = new ArrayList<>();
- JSONObject json = new JSONObject();
- try {
- json = getJSONFromPage(doc);
- } catch (IOException e) {
- logger.warn("Unable to exact json from page");
+
+ // get the rhx_gis value so we can get the next page later on
+ if (rhx_gis == null) {
+ rhx_gis = json.getString("rhx_gis");
}
-
-
if (!url.toExternalForm().contains("/p/")) {
JSONArray datas = new JSONArray();
if (!rippingTag) {
@@ -268,7 +274,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
if (data.getString("__typename").equals("GraphSidecar")) {
try {
Document slideShowDoc = Http.url(new URL("https://www.instagram.com/p/" + data.getString("shortcode"))).get();
- List toAdd = getPostsFromSinglePage(slideShowDoc);
+ List toAdd = getPostsFromSinglePage(getJSONFromPage(slideShowDoc));
for (int slideShowInt = 0; slideShowInt < toAdd.size(); slideShowInt++) {
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
}
@@ -284,9 +290,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
if (imageURLs.size() == 0) {
// We add this one item to the array because either wise
// the ripper will error out because we returned an empty array
- imageURLs.add(getOriginalUrl(data.getString("thumbnail_src")));
+ imageURLs.add(getOriginalUrl(data.getString("display_url")));
}
- addURLToDownload(new URL(getOriginalUrl(data.getString("thumbnail_src"))), image_date);
+ addURLToDownload(new URL(data.getString("display_url")), image_date);
} else {
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
addURLToDownload(new URL(getVideoFromPage(data.getString("shortcode"))), image_date);
@@ -307,23 +313,52 @@ public class InstagramRipper extends AbstractHTMLRipper {
} else { // We're ripping from a single page
logger.info("Ripping from single page");
- imageURLs = getPostsFromSinglePage(doc);
+ imageURLs = getPostsFromSinglePage(json);
}
return imageURLs;
}
+ private String getIGGis(String variables) {
+ String stringToMD5 = rhx_gis + ":" + variables;
+ logger.debug("String to md5 is \"" + stringToMD5 + "\"");
+ try {
+ byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
+
+ MessageDigest md = MessageDigest.getInstance("MD5");
+ byte[] hash = md.digest(bytesOfMessage);
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < hash.length; ++i) {
+ sb.append(Integer.toHexString((hash[i] & 0xFF) | 0x100).substring(1,3));
+ }
+ return sb.toString();
+ } catch(UnsupportedEncodingException e) {
+ return null;
+ } catch(NoSuchAlgorithmException e) {
+ return null;
+ }
+ }
+
@Override
- public Document getNextPage(Document doc) throws IOException {
- Document toreturn;
+ public JSONObject getNextPage(JSONObject json) throws IOException {
+ JSONObject toreturn;
+ java.util.Map cookies = new HashMap();
+// This shouldn't be hardcoded and will break one day
+ cookies.put("ig_pr", "1");
+ cookies.put("csrftoken", csrftoken);
if (!nextPageID.equals("") && !isThisATest()) {
if (rippingTag) {
try {
sleep(2500);
- toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
- "&variables={\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
+ String vars = "{\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}";
+ String ig_gis = getIGGis(vars);
+ toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
+ "&variables=" + vars, ig_gis);
// Sleep for a while to avoid a ban
- logger.info(toreturn.html());
+ logger.info(toreturn);
+ if (!pageHasImages(toreturn)) {
+ throw new IOException("No more pages");
+ }
return toreturn;
} catch (IOException e) {
@@ -334,8 +369,11 @@ public class InstagramRipper extends AbstractHTMLRipper {
try {
// Sleep for a while to avoid a ban
sleep(2500);
- toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" +
- "{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
+ String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
+ String ig_gis = getIGGis(vars);
+ logger.info(ig_gis);
+
+ toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
if (!pageHasImages(toreturn)) {
throw new IOException("No more pages");
}
@@ -353,9 +391,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
addURLToDownload(url);
}
- private boolean pageHasImages(Document doc) {
- logger.info("BAD DATA: " + stripHTMLTags(doc.html()));
- JSONObject json = new JSONObject(stripHTMLTags(doc.html()));
+ private boolean pageHasImages(JSONObject json) {
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
if (numberOfImages == 0) {
@@ -364,6 +400,34 @@ public class InstagramRipper extends AbstractHTMLRipper {
return true;
}
+ private JSONObject getPage(String url, String ig_gis) {
+ StringBuilder sb = new StringBuilder();
+ try {
+ // We can't use Jsoup here because it won't download a non-html file larger than a MB
+ // even if you set maxBodySize to 0
+ URLConnection connection = new URL(url).openConnection();
+ connection.setRequestProperty("User-Agent", USER_AGENT);
+ connection.setRequestProperty("x-instagram-gis", ig_gis);
+ BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
+ String line;
+ while ((line = in.readLine()) != null) {
+ sb.append(line);
+
+ }
+ in.close();
+ workAroundJsonString = sb.toString();
+ return new JSONObject(sb.toString());
+
+ } catch (MalformedURLException e) {
+ logger.info("Unable to get query_hash, " + url + " is a malformed URL");
+ return null;
+ } catch (IOException e) {
+ logger.info("Unable to get query_hash");
+ logger.info(e.getMessage());
+ return null;
+ }
+ }
+
private String getQHash(Document doc) {
String jsFileURL = "https://www.instagram.com" + doc.select("link[rel=preload]").attr("href");
StringBuilder sb = new StringBuilder();
@@ -393,6 +457,12 @@ public class InstagramRipper extends AbstractHTMLRipper {
if (m.find()) {
return m.group(1);
}
+ jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
+ m = jsP.matcher(sb.toString());
+ if (m.find()) {
+ return m.group(1);
+ }
+
} else {
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
Matcher m = jsP.matcher(sb.toString());
@@ -400,7 +470,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
return m.group(1);
}
}
- logger.info("Could not find query_hash on " + jsFileURL);
+ logger.error("Could not find query_hash on " + jsFileURL);
return null;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
index c98dac0c..376d1292 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
@@ -50,7 +50,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
// This is here for pages with mp4s instead of images
String video_image = "";
video_image = page.select("div > video > source").attr("src");
- if (video_image != "") {
+ if (!video_image.equals("")) {
urls.add(video_image);
}
return urls;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
new file mode 100644
index 00000000..8f8f8e68
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java
@@ -0,0 +1,116 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import javax.print.Doc;
+
+public class ManganeloRipper extends AbstractHTMLRipper {
+
+ public ManganeloRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "manganelo";
+ }
+
+ @Override
+ public String getDomain() {
+ return "manganelo.com";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://manganelo.com/manga/([\\S]+)/?$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+
+ p = Pattern.compile("http://manganelo.com/chapter/([\\S]+)/([\\S]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected manganelo URL format: " +
+ "/manganelo.com/manga/ID - got " + url + " instead");
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("div.btn-navigation-chap > a.back").first();
+ if (elem == null) {
+ throw new IOException("No more pages");
+ } else {
+ return Http.url(elem.attr("href")).get();
+ }
+ }
+
+ private List getURLsFromChap(String url) {
+ logger.debug("Getting urls from " + url);
+ List result = new ArrayList<>();
+ try {
+ Document doc = Http.url(url).get();
+ for (Element el : doc.select("img.img_content")) {
+ result.add(el.attr("src"));
+ }
+ return result;
+ } catch (IOException e) {
+ return null;
+ }
+
+ }
+
+ private List getURLsFromChap(Document doc) {
+ logger.debug("Getting urls from " + url);
+ List result = new ArrayList<>();
+ for (Element el : doc.select("img.img_content")) {
+ result.add(el.attr("src"));
+ }
+ return result;
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ List urlsToGrab = new ArrayList<>();
+ if (url.toExternalForm().contains("/manga/")) {
+ for (Element el : doc.select("div.chapter-list > div.row > span > a")) {
+ urlsToGrab.add(el.attr("href"));
+ }
+ Collections.reverse(urlsToGrab);
+
+ for (String url : urlsToGrab) {
+ result.addAll(getURLsFromChap(url));
+ }
+ } else if (url.toExternalForm().contains("/chapter/")) {
+ result.addAll(getURLsFromChap(doc));
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java
new file mode 100644
index 00000000..0b513b37
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java
@@ -0,0 +1,64 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class ModelxRipper extends AbstractHTMLRipper {
+
+ public ModelxRipper(URL url) throws IOException {
+ super(url);
+ }
+
+ @Override
+ public String getHost() {
+ return "modelx";
+ }
+
+ @Override
+ public String getDomain() {
+ return "modelx.org";
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("^.*modelx.org/.*/(.+)$");
+ Matcher m = p.matcher(url.toExternalForm());
+
+ if (m.matches()) {
+ return m.group(1);
+ }
+
+ throw new MalformedURLException("Expected URL format: http://www.modelx.org/[category (one or more)]/xxxxx got: " + url);
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ return Http.url(url).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document page) {
+ List result = new ArrayList<>();
+
+ for (Element el : page.select(".gallery-icon > a")) {
+ result.add(el.attr("href"));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
index 21942a47..60c90ae1 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
@@ -34,21 +34,18 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Pattern p = Pattern.compile("^https?://myhentaicomics.com/index.php/([a-zA-Z0-9-]*)/?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
- isTag = false;
return m.group(1);
}
Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
Matcher ma = pa.matcher(url.toExternalForm());
if (ma.matches()) {
- isTag = true;
return ma.group(1);
}
Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
Matcher mat = pat.matcher(url.toExternalForm());
if (mat.matches()) {
- isTag = true;
return mat.group(1);
}
@@ -56,6 +53,37 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
"myhentaicomics.com/index.php/albumName - got " + url + " instead");
}
+ @Override
+ public boolean hasQueueSupport() {
+ return true;
+ }
+
+ @Override
+ public boolean pageContainsAlbums(URL url) {
+ Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
+ Matcher ma = pa.matcher(url.toExternalForm());
+ if (ma.matches()) {
+ return true;
+ }
+
+ Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
+ Matcher mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ isTag = true;
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public List getAlbumsToQueue(Document doc) {
+ List urlsToAddToQueue = new ArrayList<>();
+ for (Element elem : doc.select(".g-album > a")) {
+ urlsToAddToQueue.add(getDomain() + elem.attr("href"));
+ }
+ return urlsToAddToQueue;
+ }
+
@Override
public Document getFirstPage() throws IOException {
// "url" is an instance field of the superclass
@@ -73,7 +101,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
if (m.matches()) {
nextUrl = "http://myhentaicomics.com" + m.group(0);
}
- if (nextUrl == "") {
+ if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
// Sleep for half a sec to avoid getting IP banned
@@ -81,161 +109,11 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return Http.url(nextUrl).get();
}
- // This replaces getNextPage when downloading from searchs and tags
- private List getNextAlbumPage(String pageUrl) {
- List albumPagesList = new ArrayList<>();
- int pageNumber = 1;
- albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
- while (true) {
- String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
- Document nextAlbumPage;
- try {
- logger.info("Grabbing " + urlToGet);
- nextAlbumPage = Http.url(urlToGet).get();
- } catch (IOException e) {
- logger.warn("Failed to log link in Jsoup");
- nextAlbumPage = null;
- e.printStackTrace();
- }
- Element elem = nextAlbumPage.select("a.ui-icon-right").first();
- String nextPage = elem.attr("href");
- pageNumber = pageNumber + 1;
- if (nextPage == "") {
- logger.info("Got " + pageNumber + " pages");
- break;
- }
- else {
- logger.info(nextPage);
- albumPagesList.add(nextPage);
- logger.info("Adding " + nextPage);
- }
- }
- return albumPagesList;
- }
- private List getAlbumsFromPage(String url) {
- List pagesToRip;
- List result = new ArrayList<>();
- logger.info("Running getAlbumsFromPage");
- Document doc;
- try {
- doc = Http.url("http://myhentaicomics.com" + url).get();
- } catch (IOException e) {
- logger.warn("Failed to log link in Jsoup");
- doc = null;
- e.printStackTrace();
- }
- // This for goes over every album on the page
- for (Element elem : doc.select("li.g-album > a")) {
- String link = elem.attr("href");
- logger.info("Grabbing album " + link);
- pagesToRip = getNextAlbumPage(link);
- logger.info(pagesToRip);
- for (String element : pagesToRip) {
- Document album_doc;
- try {
- logger.info("grabbing " + element + " with jsoup");
- boolean startsWithHttp = element.startsWith("http://");
- if (!startsWithHttp) {
- album_doc = Http.url("http://myhentaicomics.com/" + element).get();
- }
- else {
- album_doc = Http.url(element).get();
- }
- } catch (IOException e) {
- logger.warn("Failed to log link in Jsoup");
- album_doc = null;
- e.printStackTrace();
- }
- for (Element el :album_doc.select("img")) {
- String imageSource = el.attr("src");
- // This bool is here so we don't try and download the site logo
- if (!imageSource.startsWith("http://")) {
- // We replace thumbs with resizes so we can the full sized images
- imageSource = imageSource.replace("thumbs", "resizes");
- String url_string = "http://myhentaicomics.com/" + imageSource;
- url_string = url_string.replace("%20", "_");
- url_string = url_string.replace("%27", "");
- url_string = url_string.replace("%28", "_");
- url_string = url_string.replace("%29", "_");
- url_string = url_string.replace("%2C", "_");
- if (isTag) {
- logger.info("Downloading from a tag or search");
- try {
- sleep(500);
- result.add("http://myhentaicomics.com/" + imageSource);
- addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]);
- }
- catch (MalformedURLException e) {
- logger.warn("Malformed URL");
- e.printStackTrace();
- }
- }
- }
- }
- }
- }
- return result;
- }
-
- private List getListOfPages(Document doc) {
- List pages = new ArrayList<>();
- // Get the link from the last button
- String nextPageUrl = doc.select("a.ui-icon-right").last().attr("href");
- Pattern pat = Pattern.compile("/index\\.php/tag/[0-9]*/[a-zA-Z0-9_\\-:+]*\\?page=(\\d+)");
- Matcher mat = pat.matcher(nextPageUrl);
- if (mat.matches()) {
- logger.debug("Getting pages from a tag");
- String base_link = mat.group(0).replaceAll("\\?page=\\d+", "");
- logger.debug("base_link is " + base_link);
- int numOfPages = Integer.parseInt(mat.group(1));
- for (int x = 1; x != numOfPages +1; x++) {
- logger.debug("running loop");
- String link = base_link + "?page=" + Integer.toString(x);
- pages.add(link);
- }
- } else {
- Pattern pa = Pattern.compile("/index\\.php/search\\?q=[a-zA-Z0-9_\\-:]*&page=(\\d+)");
- Matcher ma = pa.matcher(nextPageUrl);
- if (ma.matches()) {
- logger.debug("Getting pages from a search");
- String base_link = ma.group(0).replaceAll("page=\\d+", "");
- logger.debug("base_link is " + base_link);
- int numOfPages = Integer.parseInt(ma.group(1));
- for (int x = 1; x != numOfPages +1; x++) {
- logger.debug("running loop");
- String link = base_link + "page=" + Integer.toString(x);
- logger.debug(link);
- pages.add(link);
- }
- }
- }
- return pages;
- }
@Override
public List getURLsFromPage(Document doc) {
List result = new ArrayList<>();
- // Checks if this is a comic page or a page of albums
- // If true the page is a page of albums
- if (doc.toString().contains("class=\"g-item g-album\"")) {
- // This if checks that there is more than 1 page
- if (doc.select("a.ui-icon-right").last().attr("href") != "") {
- // There is more than one page so we call getListOfPages
- List pagesToRip = getListOfPages(doc);
- logger.debug("Pages to rip = " + pagesToRip);
- for (String url : pagesToRip) {
- logger.debug("Getting albums from " + url);
- result = getAlbumsFromPage(url);
- }
- } else {
- logger.debug("There is only one page on this page of albums");
- // There is only 1 page so we call getAlbumsFromPage and pass it the page url
- result = getAlbumsFromPage(doc.select("div.g-description > a").attr("href"));
- }
- return result;
- }
- else {
for (Element el : doc.select("img")) {
String imageSource = el.attr("src");
// This bool is here so we don't try and download the site logo
@@ -245,7 +123,6 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
result.add("http://myhentaicomics.com/" + imageSource);
}
}
- }
return result;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
index ac8f782d..9c204a8d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
@@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.Utils;
import org.jsoup.nodes.Document;
@@ -64,6 +65,39 @@ public class NhentaiRipper extends AbstractHTMLRipper {
return "nhentai" + title;
}
+ private List getTags(Document doc) {
+ List tags = new ArrayList<>();
+ for (Element tag : doc.select("a.tag")) {
+ tags.add(tag.attr("href").replaceAll("/tag/", "").replaceAll("/", ""));
+ }
+ return tags;
+ }
+
+ /**
+ * Checks for blacklisted tags on page. If it finds one it returns it, if not it return null
+ *
+ * @param doc
+ * @return String
+ */
+ public String checkTags(Document doc, String[] blackListedTags) {
+ // If the user hasn't blacklisted any tags we return false;
+ if (blackListedTags == null) {
+ return null;
+ }
+ logger.info("Blacklisted tags " + blackListedTags[0]);
+ List tagsOnPage = getTags(doc);
+ for (String tag : blackListedTags) {
+ for (String pageTag : tagsOnPage) {
+ // We replace all dashes in the tag with spaces because the tags we get from the site are separated using
+ // dashes
+ if (tag.trim().toLowerCase().equals(pageTag.replaceAll("-", " ").toLowerCase())) {
+ return tag;
+ }
+ }
+ }
+ return null;
+ }
+
@Override
public String getGID(URL url) throws MalformedURLException {
// Ex: https://nhentai.net/g/159174/
@@ -82,6 +116,13 @@ public class NhentaiRipper extends AbstractHTMLRipper {
if (firstPage == null) {
firstPage = Http.url(url).get();
}
+
+ String blacklistedTag = checkTags(firstPage, Utils.getConfigStringArray("nhentai.blacklist.tags"));
+ if (blacklistedTag != null) {
+ sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
+ "contains the blacklisted tag \"" + blacklistedTag + "\"");
+ return null;
+ }
return firstPage;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
index d3cb0ab1..3300da50 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
@@ -33,23 +33,6 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
return "nude-gals.com";
}
- public String getAlbumTitle(URL url) throws MalformedURLException {
- try {
- Document doc = getFirstPage();
- Elements elems = doc.select("#left_col > #grid_title > .right");
-
- String girl = elems.get(3).text();
- String magazine = elems.get(2).text();
- String title = elems.get(0).text();
-
- return getHost() + "_" + girl + "-" + magazine + "-" + title;
- } catch (Exception e) {
- // Fall back to default album naming convention
- logger.warn("Failed to get album title from " + url, e);
- }
- return super.getAlbumTitle(url);
- }
-
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p;
@@ -79,9 +62,9 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
public List getURLsFromPage(Document doc) {
List imageURLs = new ArrayList<>();
- Elements thumbs = doc.select("#grid_container .grid > .grid_box");
+ Elements thumbs = doc.select("img.thumbnail");
for (Element thumb : thumbs) {
- String link = thumb.select("a").get(1).attr("href");
+ String link = thumb.attr("src").replaceAll("thumbs/th_", "");
String imgSrc = "http://nude-gals.com/" + link;
imageURLs.add(imgSrc);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
index 52e9a6d2..e8798476 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java
@@ -4,10 +4,13 @@ import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
+import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import com.rarchives.ripme.ripper.AbstractRipper;
+import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONTokener;
@@ -17,6 +20,9 @@ import com.rarchives.ripme.ui.UpdateUtils;
import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.RipUtils;
import com.rarchives.ripme.utils.Utils;
+import org.jsoup.Jsoup;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
public class RedditRipper extends AlbumRipper {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java
new file mode 100644
index 00000000..b7e0f7b0
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java
@@ -0,0 +1,94 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class Rule34Ripper extends AbstractHTMLRipper {
+
+ public Rule34Ripper(URL url) throws IOException {
+ super(url);
+ }
+
+ private String apiUrl;
+ private int pageNumber = 0;
+
+ @Override
+ public String getHost() {
+ return "rule34";
+ }
+
+ @Override
+ public String getDomain() {
+ return "rule34.xxx";
+ }
+
+ @Override
+ public boolean canRip(URL url){
+ Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public String getGID(URL url) throws MalformedURLException {
+ Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
+ throw new MalformedURLException("Expected rule34.xxx URL format: " +
+ "rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead");
+ }
+
+ public URL getAPIUrl() throws MalformedURLException {
+ URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url));
+ return urlToReturn;
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ apiUrl = getAPIUrl().toExternalForm();
+ // "url" is an instance field of the superclass
+ return Http.url(getAPIUrl()).get();
+ }
+
+ @Override
+ public Document getNextPage(Document doc) throws IOException {
+ if (doc.html().contains("Search error: API limited due to abuse")) {
+ throw new IOException("No more pages");
+ }
+ pageNumber += 1;
+ String nextPage = apiUrl + "&pid=" + pageNumber;
+ return Http.url(nextPage).get();
+ }
+
+ @Override
+ public List getURLsFromPage(Document doc) {
+ List result = new ArrayList<>();
+ for (Element el : doc.select("posts > post")) {
+ String imageSource = el.select("post").attr("file_url");
+ result.add(imageSource);
+
+ }
+ return result;
+ }
+
+ @Override
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
index c6440bb8..d83d5930 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java
@@ -43,7 +43,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
try {
- return URLDecoder.decode(m.group(2), "UTF-8");
+ return URLDecoder.decode(m.group(1) + "_" + m.group(2), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'");
}
@@ -53,6 +53,20 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
url + "instead");
}
+ public String getSubDomain(URL url){
+ Pattern p = Pattern.compile("^https?://([a-zA-Z0-9]+\\.)?sankakucomplex\\.com/.*tags=([^&]+).*$");
+ Matcher m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ try {
+ return URLDecoder.decode(m.group(1), "UTF-8");
+ } catch (UnsupportedEncodingException e) {
+ return null;
+ }
+ }
+ return null;
+
+ }
+
@Override
public Document getFirstPage() throws IOException {
if (albumDoc == null) {
@@ -71,9 +85,11 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) {
String postLink = thumbSpan.attr("href");
try {
+ String subDomain = getSubDomain(url);
+ String siteURL = "https://" + subDomain + "sankakucomplex.com";
// Get the page the full sized image is on
- Document subPage = Http.url("https://chan.sankakucomplex.com" + postLink).get();
- logger.info("Checking page " + "https://chan.sankakucomplex.com" + postLink);
+ Document subPage = Http.url(siteURL + postLink).get();
+ logger.info("Checking page " + siteURL + postLink);
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
} catch (IOException e) {
logger.warn("Error while loading page " + postLink, e);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
index d30e9b63..9de3d2ae 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java
@@ -57,7 +57,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
String nextPage = elem.parent().attr("href");
// Some times this returns a empty string
// This for stops that
- if (nextPage == "") {
+ if (nextPage.equals("")) {
return null;
}
else {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java
index 326de1a1..166bce88 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinnercomicsRipper.java
@@ -34,7 +34,7 @@ public class SinnercomicsRipper extends AbstractHTMLRipper {
Pattern p = Pattern.compile("^https?://sinnercomics.com/comic/([a-zA-Z0-9-]*)/?$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
- return m.group(1);
+ return m.group(1).replaceAll("-page-\\d+", "");
}
throw new MalformedURLException("Expected sinnercomics.com URL format: " +
"sinnercomics.com/comic/albumName - got " + url + " instead");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
index 51992ec4..b61f2fef 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java
@@ -3,24 +3,19 @@ package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
-import com.rarchives.ripme.ripper.AlbumRipper;
-import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
-/**
- * Appears to be broken as of 2015-02-11.
- * Generating large image from thumbnail requires replacing "/m/" with something else:
- * -> Sometimes "/b/"
- * -> Sometimes "/p/"
- * No way to know without loading the image page.
- */
-public class SmuttyRipper extends AlbumRipper {
+
+public class SmuttyRipper extends AbstractHTMLRipper {
private static final String DOMAIN = "smutty.com",
HOST = "smutty";
@@ -29,6 +24,16 @@ public class SmuttyRipper extends AlbumRipper {
super(url);
}
+ @Override
+ public String getHost() {
+ return "smutty";
+ }
+
+ @Override
+ public String getDomain() {
+ return "smutty.com";
+ }
+
@Override
public boolean canRip(URL url) {
return (url.getHost().endsWith(DOMAIN));
@@ -40,69 +45,57 @@ public class SmuttyRipper extends AlbumRipper {
}
@Override
- public void rip() throws IOException {
- int page = 0;
- String url, tag = getGID(this.url);
- boolean hasNextPage = true;
- while (hasNextPage) {
+ public List getURLsFromPage(Document doc) {
+ List results = new ArrayList<>();
+ for (Element image : doc.select("a.l > img")) {
if (isStopped()) {
break;
}
- page++;
- url = "http://smutty.com/h/" + tag + "/?q=%23" + tag + "&page=" + page + "&sort=date&lazy=1";
- this.sendUpdate(STATUS.LOADING_RESOURCE, url);
- logger.info(" Retrieving " + url);
- Document doc;
- try {
- doc = Http.url(url)
- .ignoreContentType()
- .get();
- } catch (IOException e) {
- if (e.toString().contains("Status=404")) {
- logger.info("No more pages to load");
- } else {
- logger.warn("Exception while loading " + url, e);
- }
- break;
- }
- for (Element image : doc.select("a.l > img")) {
- if (isStopped()) {
- break;
- }
- String imageUrl = image.attr("src");
+ String imageUrl = image.attr("src");
- // Construct direct link to image based on thumbnail
- StringBuilder sb = new StringBuilder();
- String[] fields = imageUrl.split("/");
- for (int i = 0; i < fields.length; i++) {
- if (i == fields.length - 2 && fields[i].equals("m")) {
- fields[i] = "b";
- }
- sb.append(fields[i]);
- if (i < fields.length - 1) {
- sb.append("/");
- }
+ // Construct direct link to image based on thumbnail
+ StringBuilder sb = new StringBuilder();
+ String[] fields = imageUrl.split("/");
+ for (int i = 0; i < fields.length; i++) {
+ if (i == fields.length - 2 && fields[i].equals("m")) {
+ fields[i] = "b";
+ }
+ sb.append(fields[i]);
+ if (i < fields.length - 1) {
+ sb.append("/");
}
- imageUrl = sb.toString();
- addURLToDownload(new URL("http:" + imageUrl));
- }
- if (doc.select("#next").size() == 0) {
- break; // No more pages
- }
- // Wait before loading next page
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- logger.error("[!] Interrupted while waiting to load next album:", e);
- break;
}
+ imageUrl = sb.toString();
+ results.add("http:" + imageUrl);
}
- waitForThreads();
+ return results;
}
@Override
- public String getHost() {
- return HOST;
+ public Document getNextPage(Document doc) throws IOException {
+ Element elem = doc.select("a.next").first();
+ if (elem == null) {
+ throw new IOException("No more pages");
+ }
+ String nextPage = elem.attr("href");
+ // Some times this returns a empty string
+ // This for stops that
+ if (nextPage.equals("")) {
+ throw new IOException("No more pages");
+ }
+ else {
+ return Http.url("https://smutty.com" + nextPage).get();
+ }
+ }
+
+ @Override
+ public Document getFirstPage() throws IOException {
+ // "url" is an instance field of the superclass
+ return Http.url(url).get();
+ }
+
+ public void downloadURL(URL url, int index) {
+ addURLToDownload(url, getPrefix(index));
}
@Override
@@ -117,6 +110,12 @@ public class SmuttyRipper extends AlbumRipper {
if (m.matches()) {
return m.group(1).replace("%23", "");
}
+
+ p = Pattern.compile("^https?://smutty.com/user/([a-zA-Z0-9\\-_]+)/?$");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
index 9ca91e45..70c023d3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java
@@ -17,7 +17,6 @@ import org.json.JSONObject;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
-import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@@ -35,13 +34,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
try {
// This sessionId will expire and need to be replaced
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
- logger.info(cookies);
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
String jsonInfo = doc.html().replaceAll("","").replaceAll("", "").replaceAll("", "").replaceAll("", "")
.replaceAll("", "").replaceAll("\n", "");
- logger.info(jsonInfo);
JSONObject json = new JSONObject(jsonInfo);
- logger.info(json.getJSONArray("reader_page_urls"));
return json.getJSONArray("reader_page_urls");
} catch (IOException e) {
logger.info(e);
@@ -63,11 +59,16 @@ public class TsuminoRipper extends AbstractHTMLRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)");
+ Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)/?");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1) + "_" + m.group(2);
}
+ p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/?");
+ m = p.matcher(url.toExternalForm());
+ if (m.matches()) {
+ return m.group(1);
+ }
throw new MalformedURLException("Expected tsumino URL format: " +
"tsumino.com/Book/Info/ID/TITLE - got " + url + " instead");
}
@@ -85,7 +86,6 @@ public class TsuminoRipper extends AbstractHTMLRipper {
public Document getFirstPage() throws IOException {
Connection.Response resp = Http.url(url).response();
cookies.putAll(resp.cookies());
- logger.info(resp.parse());
return resp.parse();
}
@@ -103,6 +103,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
@Override
public void downloadURL(URL url, int index) {
sleep(1000);
- addURLToDownload(url, getPrefix(index));
+ /*
+ There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always
+ "image/jpeg" even when the image is a png. The file ext is not included in the url.
+ */
+ addURLToDownload(url, getPrefix(index), "", null, null, null, null, true);
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
index dc57c48f..89884854 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java
@@ -44,7 +44,7 @@ public class TumblrRipper extends AlbumRipper {
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
/**
- * Gets the API key.
+ * Gets the API key.
* Chooses between default/included keys & user specified ones (from the config file).
* @return Tumblr API key
*/
@@ -57,7 +57,7 @@ public class TumblrRipper extends AlbumRipper {
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
return userDefinedAPIKey;
}
-
+
}
public TumblrRipper(URL url) throws IOException {
@@ -71,12 +71,12 @@ public class TumblrRipper extends AlbumRipper {
public boolean canRip(URL url) {
return url.getHost().endsWith(DOMAIN);
}
-
+
/**
* Sanitizes URL.
* @param url URL to be sanitized.
* @return Sanitized URL
- * @throws MalformedURLException
+ * @throws MalformedURLException
*/
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
@@ -230,7 +230,7 @@ public class TumblrRipper extends AlbumRipper {
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
fileURL = new URL(urlString);
} else {
- fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http", "https"));
+ fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"));
}
m = p.matcher(fileURL.toString());
if (m.matches()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
index ab34620c..abdb0320 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java
@@ -4,14 +4,12 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
-import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java
deleted file mode 100644
index 1ba53926..00000000
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/VineRipper.java
+++ /dev/null
@@ -1,95 +0,0 @@
-package com.rarchives.ripme.ripper.rippers;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.json.JSONArray;
-import org.json.JSONObject;
-import org.jsoup.HttpStatusException;
-
-import com.rarchives.ripme.ripper.AlbumRipper;
-import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
-import com.rarchives.ripme.utils.Http;
-
-public class VineRipper extends AlbumRipper {
-
- private static final String DOMAIN = "vine.co",
- HOST = "vine";
-
- public VineRipper(URL url) throws IOException {
- super(url);
- }
-
- @Override
- public boolean canRip(URL url) {
- return url.getHost().endsWith(DOMAIN);
- }
-
- @Override
- public URL sanitizeURL(URL url) throws MalformedURLException {
- return new URL("http://vine.co/u/" + getGID(url));
- }
-
- @Override
- public void rip() throws IOException {
- int page = 0;
- String baseURL = "https://vine.co/api/timelines/users/" + getGID(this.url);
- JSONObject json = null;
- while (true) {
- page++;
- String theURL = baseURL;
- if (page > 1) {
- theURL += "?page=" + page;
- }
- try {
- logger.info(" Retrieving " + theURL);
- sendUpdate(STATUS.LOADING_RESOURCE, theURL);
- json = Http.url(theURL).getJSON();
- } catch (HttpStatusException e) {
- logger.debug("Hit end of pages at page " + page, e);
- break;
- }
- JSONArray records = json.getJSONObject("data").getJSONArray("records");
- for (int i = 0; i < records.length(); i++) {
- String videoURL = records.getJSONObject(i).getString("videoUrl");
- addURLToDownload(new URL(videoURL));
- if (isThisATest()) {
- break;
- }
- }
- if (isThisATest()) {
- break;
- }
- if (records.length() == 0) {
- logger.info("Zero records returned");
- break;
- }
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- logger.error("[!] Interrupted while waiting to load next page", e);
- break;
- }
- }
- waitForThreads();
- }
-
- @Override
- public String getHost() {
- return HOST;
- }
-
- @Override
- public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^https?://(www\\.)?vine\\.co/u/([0-9]+).*$");
- Matcher m = p.matcher(url.toExternalForm());
- if (!m.matches()) {
- throw new MalformedURLException("Expected format: http://vine.co/u/######");
- }
- return m.group(m.groupCount());
- }
-
-}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java
index df373689..de785a44 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java
@@ -36,7 +36,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
@Override
public boolean canRip(URL url) {
- Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z]+/[a-zA-Z]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
+ Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
Matcher mat = pat.matcher(url.toExternalForm());
if (mat.matches()) {
return true;
@@ -47,7 +47,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
- Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z]+/[a-zA-Z]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
+ Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
Matcher mat = pat.matcher(url.toExternalForm());
if (mat.matches()) {
return getHost() + "_" + mat.group(1);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
index dbc44585..31509734 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java
@@ -44,7 +44,20 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
"freeadultcomix.com",
"thisis.delvecomic.com",
"tnbtu.com",
- "shipinbottle.pepsaga.com"
+ "shipinbottle.pepsaga.com",
+ "8muses.download",
+ "spyingwithlana.com"
+ );
+
+ private static List theme1 = Arrays.asList(
+ "www.totempole666.com",
+ "buttsmithy.com",
+ "themonsterunderthebed.net",
+ "prismblush.com",
+ "www.konradokonski.com",
+ "thisis.delvecomic.com",
+ "tnbtu.com",
+ "spyingwithlana.com"
);
@Override
@@ -135,12 +148,79 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
if (shipinbottleMat.matches()) {
return true;
}
+
+ Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
+ Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
+ if (eight_musesMat.matches()) {
+ return true;
+ }
+
+ Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
+ Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
+ if (spyingwithlanaMat.matches()) {
+ return true;
+ }
+
+ Pattern pa = Pattern.compile("^https?://8muses.download/\\?s=([a-zA-Z0-9-]*)");
+ Matcher ma = pa.matcher(url.toExternalForm());
+ if (ma.matches()) {
+ return true;
+ }
+
+ Pattern pat = Pattern.compile("https?://8muses.download/page/\\d+/\\?s=([a-zA-Z0-9-]*)");
+ Matcher mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ return true;
+ }
+
+ pat = Pattern.compile("https://8muses.download/category/([a-zA-Z0-9-]*)/?");
+ mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ return true;
+ }
}
return false;
}
+ @Override
+ public boolean hasQueueSupport() {
+ return true;
+ }
+
+ @Override
+ public boolean pageContainsAlbums(URL url) {
+ Pattern pa = Pattern.compile("^https?://8muses.download/\\?s=([a-zA-Z0-9-]*)");
+ Matcher ma = pa.matcher(url.toExternalForm());
+ if (ma.matches()) {
+ return true;
+ }
+
+ Pattern pat = Pattern.compile("https?://8muses.download/page/\\d+/\\?s=([a-zA-Z0-9-]*)");
+ Matcher mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ return true;
+ }
+
+ pat = Pattern.compile("https://8muses.download/category/([a-zA-Z0-9-]*)/?");
+ mat = pat.matcher(url.toExternalForm());
+ if (mat.matches()) {
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public List getAlbumsToQueue(Document doc) {
+ List urlsToAddToQueue = new ArrayList<>();
+ for (Element elem : doc.select("#post_masonry > article > div > figure > a")) {
+ urlsToAddToQueue.add(elem.attr("href"));
+ }
+ return urlsToAddToQueue;
+ }
+
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com/comic/([a-zA-Z0-9_-]*)/?$");
@@ -209,6 +289,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
return getHost() + "_" + "Ship_in_bottle";
}
+ Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
+ Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
+ if (eight_musesMat.matches()) {
+ return getHost() + "_" + eight_musesMat.group(1);
+ }
+
+ Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
+ Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
+ if (spyingwithlanaMat.matches()) {
+ return "spyingwithlana_" + spyingwithlanaMat.group(1).replaceAll("-page-\\d", "");
+ }
+
return super.getAlbumTitle(url);
}
@@ -227,13 +319,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
// Find next page
String nextPage = "";
Element elem = null;
- if (getHost().contains("www.totempole666.com")
- || getHost().contains("buttsmithy.com")
- || getHost().contains("themonsterunderthebed.net")
- || getHost().contains("prismblush.com")
- || getHost().contains("www.konradokonski.com")
- || getHost().contains("thisis.delvecomic.com")
- || getHost().contains("tnbtu.com")) {
+ if (theme1.contains(getHost())) {
elem = doc.select("a.comic-nav-next").first();
if (elem == null) {
throw new IOException("No more pages");
@@ -247,7 +333,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
nextPage = elem.attr("href");
}
- if (nextPage == "") {
+ if (nextPage.equals("")) {
throw new IOException("No more pages");
} else {
return Http.url(nextPage).get();
@@ -257,13 +343,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
List result = new ArrayList<>();
- if (getHost().contains("www.totempole666.com")
- || getHost().contains("buttsmithy.com")
- || getHost().contains("themonsterunderthebed.net")
- || getHost().contains("prismblush.com")
- || getHost().contains("www.konradokonski.com")
- || getHost().contains("thisis.delvecomic.com")
- || getHost().contains("tnbtu.com")) {
+ if (theme1.contains(getHost())) {
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
// If doc is the last page in the comic then elem.attr("src") returns null
// because there is no link to the next page
@@ -315,6 +395,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
}
}
+ if (url.toExternalForm().contains("8muses.download")) {
+ for (Element elem : doc.select("div.popup-gallery > figure > a")) {
+ result.add(elem.attr("href"));
+ }
+ }
+
return result;
}
@@ -327,8 +413,14 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|| getHost().contains("themonsterunderthebed.net")) {
addURLToDownload(url, pageTitle + "_");
}
- // If we're ripping a site where we can't get the page number/title we just rip normally
- addURLToDownload(url, getPrefix(index));
+ if (getHost().contains("tnbtu.com")) {
+ // We need to set the referrer header for tnbtu
+ addURLToDownload(url, getPrefix(index), "","http://www.tnbtu.com/comic", null);
+ } else {
+ // If we're ripping a site where we can't get the page number/title we just rip normally
+ addURLToDownload(url, getPrefix(index));
+ }
+
}
@Override
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
index 35fe56ff..b92aa9dd 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java
@@ -72,7 +72,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
- if (doc.select("a.next").first().attr("href") != "") {
+ if (!doc.select("a.next").first().attr("href").equals("")) {
return Http.url(doc.select("a.next").first().attr("href")).get();
} else {
throw new IOException("No more pages");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
index 4f2bac97..6dde798d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java
@@ -3,7 +3,6 @@ package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
-import java.net.URLDecoder;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
diff --git a/src/main/java/com/rarchives/ripme/ui/MainWindow.java b/src/main/java/com/rarchives/ripme/ui/MainWindow.java
index 016d25f3..59f75e06 100644
--- a/src/main/java/com/rarchives/ripme/ui/MainWindow.java
+++ b/src/main/java/com/rarchives/ripme/ui/MainWindow.java
@@ -16,9 +16,7 @@ import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.Date;
-import java.util.Enumeration;
+import java.util.*;
import java.util.List;
import javax.imageio.ImageIO;
@@ -138,6 +136,17 @@ public final class MainWindow implements Runnable, RipStatusHandler {
private static AbstractRipper ripper;
+ private ResourceBundle rb = Utils.getResourceBundle();
+
+ private void updateQueueLabel() {
+ if (queueListModel.size() > 0) {
+ optionQueue.setText( rb.getString("Queue") + " (" + queueListModel.size() + ")");
+ } else {
+ optionQueue.setText(rb.getString("Queue"));
+ }
+ }
+
+
private static void addCheckboxListener(JCheckBox checkBox, String configString) {
checkBox.addActionListener(arg0 -> {
Utils.setConfigBoolean(configString, checkBox.isSelected());
@@ -153,6 +162,11 @@ public final class MainWindow implements Runnable, RipStatusHandler {
return checkbox;
}
+
+ public static void addUrlToQueue(String url) {
+ queueListModel.addElement(url);
+ }
+
public MainWindow() {
mainFrame = new JFrame("RipMe v" + UpdateUtils.getThisJarVersion());
mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
@@ -289,7 +303,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
gbc.gridx = 3; ripPanel.add(stopButton, gbc);
gbc.weightx = 1;
- statusLabel = new JLabel("Inactive");
+ statusLabel = new JLabel(rb.getString("inactive"));
statusLabel.setHorizontalAlignment(JLabel.CENTER);
openButton = new JButton();
openButton.setVisible(false);
@@ -307,10 +321,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
JPanel optionsPanel = new JPanel(new GridBagLayout());
optionsPanel.setBorder(emptyBorder);
- optionLog = new JButton("Log");
- optionHistory = new JButton("History");
- optionQueue = new JButton("Queue");
- optionConfiguration = new JButton("Configuration");
+ optionLog = new JButton(rb.getString("Log"));
+ optionHistory = new JButton(rb.getString("History"));
+ optionQueue = new JButton(rb.getString("Queue"));
+ optionConfiguration = new JButton(rb.getString("Configuration"));
optionLog.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
optionHistory.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
optionQueue.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
@@ -402,9 +416,9 @@ public final class MainWindow implements Runnable, RipStatusHandler {
historyTable.getColumnModel().getColumn(i).setPreferredWidth(width);
}
JScrollPane historyTableScrollPane = new JScrollPane(historyTable);
- historyButtonRemove = new JButton("Remove");
- historyButtonClear = new JButton("Clear");
- historyButtonRerip = new JButton("Re-rip Checked");
+ historyButtonRemove = new JButton(rb.getString("remove"));
+ historyButtonClear = new JButton(rb.getString("clear"));
+ historyButtonRerip = new JButton(rb.getString("re-rip.checked"));
gbc.gridx = 0;
// History List Panel
JPanel historyTablePanel = new JPanel(new GridBagLayout());
@@ -440,11 +454,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
for (String item : Utils.getConfigList("queue")) {
queueListModel.addElement(item);
}
- if (queueListModel.size() > 0) {
- optionQueue.setText("Queue (" + queueListModel.size() + ")");
- } else {
- optionQueue.setText("Queue");
- }
+ updateQueueLabel();
gbc.gridx = 0;
JPanel queueListPanel = new JPanel(new GridBagLayout());
gbc.fill = GridBagConstraints.BOTH;
@@ -459,27 +469,27 @@ public final class MainWindow implements Runnable, RipStatusHandler {
configurationPanel.setBorder(emptyBorder);
configurationPanel.setVisible(false);
// TODO Configuration components
- configUpdateButton = new JButton("Check for updates");
- configUpdateLabel = new JLabel("Current version: " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT);
- JLabel configThreadsLabel = new JLabel("Maximum download threads:", JLabel.RIGHT);
- JLabel configTimeoutLabel = new JLabel("Timeout (in milliseconds):", JLabel.RIGHT);
- JLabel configRetriesLabel = new JLabel("Retry download count:", JLabel.RIGHT);
+ configUpdateButton = new JButton(rb.getString("check.for.updates"));
+ configUpdateLabel = new JLabel( rb.getString("current.version") + ": " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT);
+ JLabel configThreadsLabel = new JLabel(rb.getString("max.download.threads") + ":", JLabel.RIGHT);
+ JLabel configTimeoutLabel = new JLabel(rb.getString("timeout.mill"), JLabel.RIGHT);
+ JLabel configRetriesLabel = new JLabel(rb.getString("retry.download.count"), JLabel.RIGHT);
configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3)));
configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000)));
configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3)));
- configOverwriteCheckbox = addNewCheckbox("Overwrite existing files?", "file.overwrite", false);
- configAutoupdateCheckbox = addNewCheckbox("Auto-update?", "auto.update", true);
- configPlaySound = addNewCheckbox("Sound when rip completes", "play.sound", false);
- configShowPopup = addNewCheckbox("Notification when rip starts", "download.show_popup", false);
- configSaveOrderCheckbox = addNewCheckbox("Preserve order", "download.save_order", true);
- configSaveLogs = addNewCheckbox("Save logs", "log.save", false);
- configSaveURLsOnly = addNewCheckbox("Save URLs only", "urls_only.save", false);
- configSaveAlbumTitles = addNewCheckbox("Save album titles", "album_titles.save", true);
- configClipboardAutorip = addNewCheckbox("Autorip from Clipboard", "clipboard.autorip", false);
- configSaveDescriptions = addNewCheckbox("Save descriptions", "descriptions.save", true);
- configPreferMp4 = addNewCheckbox("Prefer MP4 over GIF","prefer.mp4", false);
- configWindowPosition = addNewCheckbox("Restore window position", "window.position", true);
- configURLHistoryCheckbox = addNewCheckbox("Remember URL history", "remember.url_history", true);
+ configOverwriteCheckbox = addNewCheckbox(rb.getString("overwrite.existing.files"), "file.overwrite", false);
+ configAutoupdateCheckbox = addNewCheckbox(rb.getString("auto.update"), "auto.update", true);
+ configPlaySound = addNewCheckbox(rb.getString("sound.when.rip.completes"), "play.sound", false);
+ configShowPopup = addNewCheckbox(rb.getString("notification.when.rip.starts"), "download.show_popup", false);
+ configSaveOrderCheckbox = addNewCheckbox(rb.getString("preserve.order"), "download.save_order", true);
+ configSaveLogs = addNewCheckbox(rb.getString("save.logs"), "log.save", false);
+ configSaveURLsOnly = addNewCheckbox(rb.getString("save.urls.only"), "urls_only.save", false);
+ configSaveAlbumTitles = addNewCheckbox(rb.getString("save.album.titles"), "album_titles.save", true);
+ configClipboardAutorip = addNewCheckbox(rb.getString("autorip.from.clipboard"), "clipboard.autorip", false);
+ configSaveDescriptions = addNewCheckbox(rb.getString("save.descriptions"), "descriptions.save", true);
+ configPreferMp4 = addNewCheckbox(rb.getString("prefer.mp4.over.gif"),"prefer.mp4", false);
+ configWindowPosition = addNewCheckbox(rb.getString("restore.window.position"), "window.position", true);
+ configURLHistoryCheckbox = addNewCheckbox(rb.getString("remember.url.history"), "remember.url_history", true);
configLogLevelCombobox = new JComboBox(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug"));
@@ -785,11 +795,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
queueListModel.addListDataListener(new ListDataListener() {
@Override
public void intervalAdded(ListDataEvent arg0) {
- if (queueListModel.size() > 0) {
- optionQueue.setText("Queue (" + queueListModel.size() + ")");
- } else {
- optionQueue.setText("Queue");
- }
+ updateQueueLabel();
if (!isRipping) {
ripNextAlbum();
}
@@ -966,7 +972,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
HISTORY.clear();
if (historyFile.exists()) {
try {
- logger.info("Loading history from " + historyFile.getCanonicalPath());
+ logger.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
HISTORY.fromFile(historyFile.getCanonicalPath());
} catch (IOException e) {
logger.error("Failed to load history from file " + historyFile, e);
@@ -979,7 +985,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
JOptionPane.ERROR_MESSAGE);
}
} else {
- logger.info("Loading history from configuration");
+ logger.info(rb.getString("loading.history.from.configuration"));
HISTORY.fromList(Utils.getConfigList("download.history"));
if (HISTORY.toList().size() == 0) {
// Loaded from config, still no entries.
@@ -1025,17 +1031,13 @@ public final class MainWindow implements Runnable, RipStatusHandler {
return;
}
String nextAlbum = (String) queueListModel.remove(0);
- if (queueListModel.isEmpty()) {
- optionQueue.setText("Queue");
- } else {
- optionQueue.setText("Queue (" + queueListModel.size() + ")");
- }
+ updateQueueLabel();
Thread t = ripAlbum(nextAlbum);
if (t == null) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
- logger.error("Interrupted while waiting to rip next album", ie);
+ logger.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
}
ripNextAlbum();
} else {
diff --git a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
index 80282ccb..59f21f09 100644
--- a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
+++ b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java
@@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
public class UpdateUtils {
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
- private static final String DEFAULT_VERSION = "1.7.27";
+ private static final String DEFAULT_VERSION = "1.7.47";
private static final String REPO_NAME = "ripmeapp/ripme";
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
private static final String mainFileName = "ripme.jar";
diff --git a/src/main/java/com/rarchives/ripme/utils/Proxy.java b/src/main/java/com/rarchives/ripme/utils/Proxy.java
new file mode 100644
index 00000000..be3c3b7e
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/utils/Proxy.java
@@ -0,0 +1,99 @@
+package com.rarchives.ripme.utils;
+
+import java.net.Authenticator;
+import java.net.PasswordAuthentication;
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Proxy/Socks setter
+ */
+public class Proxy {
+ private Proxy() {
+ }
+
+ /**
+ * Parse the proxy server settings from string, using the format
+ * [user:password]@host[:port].
+ *
+ * @param fullproxy the string to parse
+ * @return HashMap containing proxy server, port, user and password
+ */
+ private static Map parseServer(String fullproxy) {
+ Map proxy = new HashMap();
+
+ if (fullproxy.lastIndexOf("@") != -1) {
+ int sservli = fullproxy.lastIndexOf("@");
+ String userpw = fullproxy.substring(0, sservli);
+ String[] usersplit = userpw.split(":");
+ proxy.put("user", usersplit[0]);
+ proxy.put("password", usersplit[1]);
+ fullproxy = fullproxy.substring(sservli + 1);
+ }
+ String[] servsplit = fullproxy.split(":");
+ if (servsplit.length == 2) {
+ proxy.put("port", servsplit[1]);
+ }
+ proxy.put("server", servsplit[0]);
+ return proxy;
+ }
+
+ /**
+ * Set a HTTP Proxy.
+ * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
+ * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
+ * see https://stackoverflow.com/q/41505219
+ *
+ * @param fullproxy the proxy, using format [user:password]@host[:port]
+ */
+ public static void setHTTPProxy(String fullproxy) {
+ Map proxyServer = parseServer(fullproxy);
+
+ if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
+ Authenticator.setDefault(new Authenticator(){
+ protected PasswordAuthentication getPasswordAuthentication(){
+ PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
+ return p;
+ }
+ });
+ System.setProperty("http.proxyUser", proxyServer.get("user"));
+ System.setProperty("http.proxyPassword", proxyServer.get("password"));
+ System.setProperty("https.proxyUser", proxyServer.get("user"));
+ System.setProperty("https.proxyPassword", proxyServer.get("password"));
+ }
+
+ if (proxyServer.get("port") != null) {
+ System.setProperty("http.proxyPort", proxyServer.get("port"));
+ System.setProperty("https.proxyPort", proxyServer.get("port"));
+ }
+
+ System.setProperty("http.proxyHost", proxyServer.get("server"));
+ System.setProperty("https.proxyHost", proxyServer.get("server"));
+ }
+
+ /**
+ * Set a Socks Proxy Server (globally).
+ *
+ * @param fullsocks the socks server, using format [user:password]@host[:port]
+ */
+ public static void setSocks(String fullsocks) {
+
+ Map socksServer = parseServer(fullsocks);
+ if (socksServer.get("user") != null && socksServer.get("password") != null) {
+ Authenticator.setDefault(new Authenticator(){
+ protected PasswordAuthentication getPasswordAuthentication(){
+ PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
+ return p;
+ }
+ });
+ System.setProperty("java.net.socks.username", socksServer.get("user"));
+ System.setProperty("java.net.socks.password", socksServer.get("password"));
+ }
+ if (socksServer.get("port") != null) {
+ System.setProperty("socksProxyPort", socksServer.get("port"));
+ }
+
+ System.setProperty("socksProxyHost", socksServer.get("server"));
+ }
+
+}
diff --git a/src/main/java/com/rarchives/ripme/utils/RipUtils.java b/src/main/java/com/rarchives/ripme/utils/RipUtils.java
index b7b8c239..01d20e7c 100644
--- a/src/main/java/com/rarchives/ripme/utils/RipUtils.java
+++ b/src/main/java/com/rarchives/ripme/utils/RipUtils.java
@@ -9,19 +9,18 @@ import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import com.rarchives.ripme.ripper.AbstractRipper;
+import com.rarchives.ripme.ripper.rippers.EroShareRipper;
+import com.rarchives.ripme.ripper.rippers.EromeRipper;
+import com.rarchives.ripme.ripper.rippers.ImgurRipper;
+import com.rarchives.ripme.ripper.rippers.VidbleRipper;
+import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
-import com.rarchives.ripme.ripper.AbstractRipper;
-import com.rarchives.ripme.ripper.rippers.ImgurRipper;
-import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
-import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurImage;
-import com.rarchives.ripme.ripper.rippers.VidbleRipper;
-import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
-import com.rarchives.ripme.ripper.rippers.EroShareRipper;
public class RipUtils {
private static final Logger logger = Logger.getLogger(RipUtils.class);
@@ -35,8 +34,8 @@ public class RipUtils {
&& url.toExternalForm().contains("imgur.com/a/")) {
try {
logger.debug("Fetching imgur album at " + url);
- ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
- for (ImgurImage imgurImage : imgurAlbum.images) {
+ ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
+ for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
logger.debug("Got imgur image: " + imgurImage.url);
result.add(imgurImage.url);
}
@@ -49,8 +48,8 @@ public class RipUtils {
// Imgur image series.
try {
logger.debug("Fetching imgur series at " + url);
- ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
- for (ImgurImage imgurImage : imgurAlbum.images) {
+ ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
+ for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
logger.debug("Got imgur image: " + imgurImage.url);
result.add(imgurImage.url);
}
@@ -91,6 +90,21 @@ public class RipUtils {
return result;
}
+ else if (url.toExternalForm().contains("erome.com")) {
+ try {
+ logger.info("Getting eroshare album " + url);
+ EromeRipper r = new EromeRipper(url);
+ Document tempDoc = r.getFirstPage();
+ for (String u : r.getURLsFromPage(tempDoc)) {
+ result.add(new URL(u));
+ }
+ } catch (IOException e) {
+ // Do nothing
+ logger.warn("Exception while retrieving eroshare page:", e);
+ }
+ return result;
+ }
+
Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
@@ -122,8 +136,8 @@ public class RipUtils {
try {
// Fetch the page
Document doc = Jsoup.connect(url.toExternalForm())
- .userAgent(AbstractRipper.USER_AGENT)
- .get();
+ .userAgent(AbstractRipper.USER_AGENT)
+ .get();
for (Element el : doc.select("meta")) {
if (el.attr("name").equals("twitter:image:src")) {
result.add(new URL(el.attr("content")));
diff --git a/src/main/java/com/rarchives/ripme/utils/UTF8Control.java b/src/main/java/com/rarchives/ripme/utils/UTF8Control.java
new file mode 100644
index 00000000..6cd81bbb
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/utils/UTF8Control.java
@@ -0,0 +1,46 @@
+package com.rarchives.ripme.utils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Locale;
+import java.util.PropertyResourceBundle;
+import java.util.ResourceBundle;
+
+// Code taken from https://stackoverflow.com/questions/4659929/how-to-use-utf-8-in-resource-properties-with-resourcebundle/4660195#4660195
+
+public class UTF8Control extends ResourceBundle.Control {
+ public ResourceBundle newBundle
+ (String baseName, Locale locale, String format, ClassLoader loader, boolean reload)
+ throws IllegalAccessException, InstantiationException, IOException
+ {
+ // The below is a copy of the default implementation.
+ String bundleName = toBundleName(baseName, locale);
+ String resourceName = toResourceName(bundleName, "properties");
+ ResourceBundle bundle = null;
+ InputStream stream = null;
+ if (reload) {
+ URL url = loader.getResource(resourceName);
+ if (url != null) {
+ URLConnection connection = url.openConnection();
+ if (connection != null) {
+ connection.setUseCaches(false);
+ stream = connection.getInputStream();
+ }
+ }
+ } else {
+ stream = loader.getResourceAsStream(resourceName);
+ }
+ if (stream != null) {
+ try {
+ // Only this line is changed to make it to read properties files as UTF-8.
+ bundle = new PropertyResourceBundle(new InputStreamReader(stream, "UTF-8"));
+ } finally {
+ stream.close();
+ }
+ }
+ return bundle;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/com/rarchives/ripme/utils/Utils.java b/src/main/java/com/rarchives/ripme/utils/Utils.java
index 9517c528..828d552a 100644
--- a/src/main/java/com/rarchives/ripme/utils/Utils.java
+++ b/src/main/java/com/rarchives/ripme/utils/Utils.java
@@ -1,9 +1,6 @@
package com.rarchives.ripme.utils;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
+import java.io.*;
import java.lang.reflect.Constructor;
import java.net.URISyntaxException;
import java.net.URL;
@@ -92,9 +89,25 @@ public class Utils {
return workingDir;
}
+ /**
+ * Gets the value of a specific config key.
+ *
+ * @param key The name of the config parameter you want to find.
+ * @param defaultValue What the default value would be.
+ */
public static String getConfigString(String key, String defaultValue) {
return config.getString(key, defaultValue);
}
+
+ public static String[] getConfigStringArray(String key) {
+ String[] s = config.getStringArray(key);
+ if (s.length == 0) {
+ return null;
+ } else {
+ return s;
+ }
+ }
+
public static int getConfigInteger(String key, int defaultValue) {
return config.getInt(key, defaultValue);
}
@@ -135,31 +148,53 @@ public class Utils {
}
}
+ /**
+ * Determines if your current system is a Windows system.
+ */
private static boolean isWindows() {
return OS.contains("win");
}
+ /**
+ * Determines if your current system is a Mac system
+ */
private static boolean isMacOS() {
return OS.contains("mac");
}
+ /**
+ * Determines if current system is based on UNIX
+ */
private static boolean isUnix() {
return OS.contains("nix") || OS.contains("nux") || OS.contains("bsd");
}
+ /**
+ * Gets the directory of where the config file is stored on a Windows machine.
+ */
private static String getWindowsConfigDir() {
return System.getenv("LOCALAPPDATA") + File.separator + "ripme";
}
+
+ /**
+ * Gets the directory of where the config file is stored on a UNIX machine.
+ */
private static String getUnixConfigDir() {
return System.getProperty("user.home") + File.separator + ".config" + File.separator + "ripme";
}
-
+
+ /**
+ * Gets the directory of where the config file is stored on a Mac machine.
+ */
private static String getMacOSConfigDir() {
return System.getProperty("user.home")
+ File.separator + "Library" + File.separator + "Application Support" + File.separator + "ripme";
}
+ /**
+ * Determines if the app is running in a portable mode. i.e. on a USB stick
+ */
private static boolean portableMode() {
try {
File f = new File(new File(".").getCanonicalPath() + File.separator + configFile);
@@ -172,7 +207,9 @@ public class Utils {
return false;
}
-
+ /**
+ * Gets the directory of the config directory, for all systems.
+ */
public static String getConfigDir() {
if (portableMode()) {
try {
@@ -192,17 +229,24 @@ public class Utils {
return ".";
}
}
- // Delete the url history file
+ /**
+ * Delete the url history file
+ */
public static void clearURLHistory() {
File file = new File(getURLHistoryFile());
file.delete();
}
- // Return the path of the url history file
+ /**
+ * Return the path of the url history file
+ */
public static String getURLHistoryFile() {
return getConfigDir() + File.separator + "url_history.txt";
}
+ /**
+ * Gets the path to the configuration file.
+ */
private static String getConfigFilePath() {
return getConfigDir() + File.separator + configFile;
}
@@ -228,6 +272,15 @@ public class Utils {
return prettySaveAs;
}
+ /**
+ * Strips away URL parameters, which usually appear at the end of URLs.
+ * E.g. the ?query on PHP
+ *
+ * @param url The URL to filter/strip
+ * @param parameter The parameter to strip
+ *
+ * @return The stripped URL
+ */
public static String stripURLParameter(String url, String parameter) {
int paramIndex = url.indexOf("?" + parameter);
boolean wasFirstParam = true;
@@ -255,6 +308,7 @@ public class Utils {
/**
* Removes the current working directory from a given filename
* @param file
+ * Path to the file
* @return
* 'file' without the leading current working directory
*/
@@ -338,9 +392,24 @@ public class Utils {
}
private static final int SHORTENED_PATH_LENGTH = 12;
+ /**
+ * Shortens the path to a file
+ * @param path
+ * String of the path to the file
+ * @return
+ * The simplified path to the file.
+ */
public static String shortenPath(String path) {
return shortenPath(new File(path));
}
+
+ /**
+ * Shortens the path to a file
+ * @param file
+ * File object that you want the shortened path of.
+ * @return
+ * The simplified path to the file.
+ */
public static String shortenPath(File file) {
String path = removeCWD(file);
if (path.length() < SHORTENED_PATH_LENGTH * 2) {
@@ -351,6 +420,13 @@ public class Utils {
+ path.substring(path.length() - SHORTENED_PATH_LENGTH);
}
+ /**
+ * Sanitizes a string so that a filesystem can handle it
+ * @param text
+ * The text to be sanitized.
+ * @return
+ * The sanitized text.
+ */
public static String filesystemSanitized(String text) {
text = text.replaceAll("[^a-zA-Z0-9.-]", "_");
return text;
@@ -400,6 +476,13 @@ public class Utils {
return original;
}
+ /**
+ * Converts an integer into a human readable string
+ * @param bytes
+ * Non-human readable integer.
+ * @return
+ * Human readable interpretation of a byte.
+ */
public static String bytesToHumanReadable(int bytes) {
float fbytes = (float) bytes;
String[] mags = new String[] {"", "K", "M", "G", "T"};
@@ -411,6 +494,10 @@ public class Utils {
return String.format("%.2f%siB", fbytes, mags[magIndex]);
}
+ /**
+ * Gets and returns a list of all the album rippers present in the "com.rarchives.ripme.ripper.rippers" package.
+ * @return List of all album rippers present.
+ */
public static List getListOfAlbumRippers() throws Exception {
List list = new ArrayList<>();
for (Constructor> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
@@ -418,6 +505,11 @@ public class Utils {
}
return list;
}
+
+ /**
+ * Gets and returns a list of all video rippers present in the "com.rarchives.rime.rippers.video" package
+ * @return List of all the video rippers.
+ */
public static List getListOfVideoRippers() throws Exception {
List list = new ArrayList<>();
for (Constructor> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
@@ -426,6 +518,11 @@ public class Utils {
return list;
}
+ /**
+ * Plays a sound from a file.
+ * @param filename
+ * Path to the sound file
+ */
public static void playSound(String filename) {
URL resource = ClassLoader.getSystemClassLoader().getResource(filename);
try {
@@ -563,6 +660,9 @@ public class Utils {
cookieCache = new HashMap>();
}
+ /**
+ * Gets all the cookies from a certain host
+ */
public static Map getCookies(String host) {
HashMap domainCookies = cookieCache.get(host);
if (domainCookies == null) {
@@ -579,4 +679,25 @@ public class Utils {
}
return domainCookies;
}
+
+ /**
+ * Gets the ResourceBundle AKA language package.
+ * Used for choosing the language of the UI.
+ *
+ * @return Returns the default resource bundle using the language specified in the config file.
+ */
+ public static ResourceBundle getResourceBundle() {
+ if (!getConfigString("lang", "").equals("")) {
+ String[] langCode = getConfigString("lang", "").split("_");
+ logger.info("Setting locale to " + getConfigString("lang", ""));
+ return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
+ }
+ try {
+ ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.getDefault(), new UTF8Control());
+ return rb;
+ } catch (MissingResourceException e) {
+ ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.ROOT);
+ return rb;
+ }
+ }
}
diff --git a/src/main/resources/LabelsBundle.properties b/src/main/resources/LabelsBundle.properties
new file mode 100644
index 00000000..3a42ab0c
--- /dev/null
+++ b/src/main/resources/LabelsBundle.properties
@@ -0,0 +1,37 @@
+Log = Log
+History = History
+created = created
+modified = modified
+Queue = Queue
+Configuration = Configuration
+
+# Keys for the Configuration menu
+
+current.version = Current version
+check.for.updates = Check for updates
+auto.update = Auto-update?
+max.download.threads = Maximum download threads
+timeout.mill = Timeout (in milliseconds):
+retry.download.count = Retry download count
+overwrite.existing.files = Overwrite existing files?
+sound.when.rip.completes = Sound when rip completes
+preserve.order = Preserve order
+save.logs = Save logs
+notification.when.rip.starts = Notification when rip starts
+save.urls.only = Save URLs only
+save.album.titles = Save album titles
+autorip.from.clipboard = Autorip from Clipboard
+save.descriptions = Save descriptions
+prefer.mp4.over.gif = Prefer MP4 over GIF
+restore.window.position = Restore window position
+remember.url.history = Remember URL history
+loading.history.from = Loading history from
+
+# Misc UI keys
+
+loading.history.from.configuration = Loading history from configuration
+interrupted.while.waiting.to.rip.next.album = Interrupted while waiting to rip next album
+inactive = Inactive
+re-rip.checked = Re-rip Checked
+remove = Remove
+clear = Clear
\ No newline at end of file
diff --git a/src/main/resources/LabelsBundle_de_DE.properties b/src/main/resources/LabelsBundle_de_DE.properties
new file mode 100644
index 00000000..da1fc8e2
--- /dev/null
+++ b/src/main/resources/LabelsBundle_de_DE.properties
@@ -0,0 +1,38 @@
+Log = Log
+History = Verlauf
+created = erstellt
+modified = geändert
+Queue = Queue
+Configuration = Konfiguration
+
+# Keys for the Configuration menu
+
+current.version = Aktuelle Version
+check.for.updates = Suche nach Aktualisierungen
+auto.update = Automatisch Aktualisieren?
+max.download.threads = Maximum download threads
+timeout.mill = Timeout (in milliseconds):
+retry.download.count = Anzahl der Downloadversuche
+overwrite.existing.files = Überschreibe bereits existierende Dateien?
+sound.when.rip.completes = Ton abspielen bei fertigem Download
+preserve.order = Reihenfolge beibehalten
+save.logs = Speichere Logs
+notification.when.rip.starts = Benachrichtigung wenn Download startet
+save.urls.only = Speicher nur URLs
+save.album.titles = Speichere Albumtitels
+autorip.from.clipboard = Automatisch Downloaden von der Zwischenablage
+save.descriptions = Speichere Beschreibungen
+prefer.mp4.over.gif = Bevorzuge MP4 über GIF
+restore.window.position = Wieder herstellen der Fensterposition
+remember.url.history = Erinnere URL Verlauf
+loading.history.from = Lade Verlauf von
+
+# Misc UI keys
+
+loading.history.from.configuration = Lade Verlauf aus Konfiguration
+interrupted.while.waiting.to.rip.next.album = Unterbrochen während Download des nächsten Albums
+inactive = Inaktiv
+re-rip.checked = Re-rip Überprüft
+remove = Entfernen
+clear = Leeren
+
diff --git a/src/main/resources/LabelsBundle_es_ES.properties b/src/main/resources/LabelsBundle_es_ES.properties
new file mode 100644
index 00000000..a1aa5a4a
--- /dev/null
+++ b/src/main/resources/LabelsBundle_es_ES.properties
@@ -0,0 +1,37 @@
+Log = Log
+History = Historia
+created = creado
+modified = modificado
+Queue = Cola
+Configuration = Configuracion
+
+# Keys for the Configuration menu
+
+current.version = Version Actual
+check.for.updates = Buscar actualizaciones
+auto.update = Auto-actualizar?
+max.download.threads = Maximos procesos de descarga
+timeout.mill = Timeout (in milliseconds):
+retry.download.count = Numero de reintentos de descarga
+overwrite.existing.files = Sobreescribir archivos existentes?
+sound.when.rip.completes = Sonar cuando el Rip termina
+preserve.order = Mantener orden
+save.logs = Guardar logs
+notification.when.rip.starts = Notificar cuando el Rip comienza
+save.urls.only = Guardar solamente URLs
+save.album.titles = Guardar titulos de albunes
+autorip.from.clipboard = Autorip desde Portapapeles
+save.descriptions = Guardar descripciones
+prefer.mp4.over.gif = Preferir MP4 sobre GIF
+restore.window.position = Restaurar posicion de ventana
+remember.url.history = Recordar historia URL
+loading.history.from = Cargando historia desde
+
+# Misc UI keys
+
+loading.history.from.configuration = Cargando historia desde la configuracion
+interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el Rip del proximo album
+inactive = Inactivo
+re-rip.checked = Re-rip marcado
+remove = Quitar
+clear = Limpiar
\ No newline at end of file
diff --git a/src/main/resources/LabelsBundle_fr_CH.properties b/src/main/resources/LabelsBundle_fr_CH.properties
new file mode 100644
index 00000000..1e23645b
--- /dev/null
+++ b/src/main/resources/LabelsBundle_fr_CH.properties
@@ -0,0 +1,37 @@
+Log = Journal
+History = Historique
+created = créé le
+modified = modifié le
+Queue = File d'attente
+Configuration = Configuration
+
+# Keys for the Configuration menu
+
+current.version = Version actuelle
+check.for.updates = Vérifier mises à jour
+auto.update = Mises à jour automatiques?
+max.download.threads = Nombre de téléchargements parallèles maximum
+timeout.mill = Délai d'expiration (en millisecondes):
+retry.download.count = Nombre d'essais téléchargement
+overwrite.existing.files = Remplacer fichiers existants ?
+sound.when.rip.completes = Son lorsque le rip est terminé
+preserve.order = Conserver l'ordre
+save.logs = Enregistrer journaux
+notification.when.rip.starts = Notification lorsqu'un rip commence
+save.urls.only = Enregistrer URL uniquement
+save.album.titles = Enregistrer titres d'album
+autorip.from.clipboard = Autorip depuis presse-papier
+save.descriptions = Enregistrer descriptions
+prefer.mp4.over.gif = Préférer MP4 à GIF
+restore.window.position = Restaurer la position de la fenêtre
+remember.url.history = Se souvenir de l'historique des URL
+loading.history.from = Charger l'historique depuis
+
+# Misc UI keys
+
+loading.history.from.configuration = Charger l'historique depuis la configuration
+interrupted.while.waiting.to.rip.next.album = Interrompu lors de l'attente pour ripper le prochain album
+inactive = Inactif
+re-rip.checked = Re-rip vérifié
+remove = Enlever
+clear = Effacer
\ No newline at end of file
diff --git a/src/main/resources/LabelsBundle_pt_PT.properties b/src/main/resources/LabelsBundle_pt_PT.properties
new file mode 100644
index 00000000..61581728
--- /dev/null
+++ b/src/main/resources/LabelsBundle_pt_PT.properties
@@ -0,0 +1,37 @@
+Log = Registo
+History = Histórico
+created = criado
+modified = modificado
+Queue = Fila
+Configuration = Configuração
+
+# Keys for the Configuration menu
+
+current.version = Versão atual
+check.for.updates = Verificar atualizações
+auto.update = Atualização automática?
+max.download.threads = Número máximo de processos de transferência
+timeout.mill = Timeout (em milissegundos):
+retry.download.count = Número de novas tentativas de transferência
+overwrite.existing.files = Sobrescrever ficheiros existentes?
+sound.when.rip.completes = Notificar quando o rip é concluído
+preserve.order = Manter a ordem
+save.logs = Guardar registos
+notification.when.rip.starts = Notificar quando o rip começar
+save.urls.only = Apenas guardar URLs
+save.album.titles = Guardar os títulos de álbuns
+autorip.from.clipboard = Autorip da área de transferência
+save.descriptions = Guardar descrições
+prefer.mp4.over.gif = Preferir MP4 a GIF
+restore.window.position = Restaurar posição da janela
+remember.url.history = Lembrar histórico de URL
+loading.history.from = Carregar histórico de
+
+# Misc UI keys
+
+loading.history.from.configuration = A carregar o histórico da configuração
+interrupted.while.waiting.to.rip.next.album = Interrompido durante a espera do rip do próximo álbum
+inactive = Inativo
+re-rip.checked = Re-rip verificado
+remove = Remover
+clear = Limpar
diff --git a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java
new file mode 100644
index 00000000..f1d8eff5
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java
@@ -0,0 +1,30 @@
+package com.rarchives.ripme.tst;
+
+import com.rarchives.ripme.ripper.AbstractRipper;
+import junit.framework.TestCase;
+
+import java.io.IOException;
+import java.net.URL;
+
+
+
+public class AbstractRipperTest extends TestCase {
+
+ public void testGetFileName() throws IOException {
+ String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test");
+ assertEquals("test.test", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null);
+ assertEquals("test", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null);
+ assertEquals("Object", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null);
+ assertEquals("file.png", fileName);
+
+ fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null);
+ assertEquals("file.", fileName);
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/proxyTest.java b/src/test/java/com/rarchives/ripme/tst/proxyTest.java
new file mode 100644
index 00000000..36ea2f55
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/proxyTest.java
@@ -0,0 +1,52 @@
+package com.rarchives.ripme.tst;
+
+import java.io.IOException;
+import java.net.URL;
+import com.rarchives.ripme.utils.Proxy;
+import com.rarchives.ripme.utils.Utils;
+import junit.framework.TestCase;
+import com.rarchives.ripme.utils.Http;
+
+
+public class proxyTest extends TestCase {
+
+
+ // This test will only run on machines where the user has added a entry for proxy.socks
+ public void testSocksProxy() throws IOException {
+ // Unset proxy before testing
+ System.setProperty("http.proxyHost", "");
+ System.setProperty("https.proxyHost", "");
+ System.setProperty("socksProxyHost", "");
+ URL url = new URL("https://icanhazip.com");
+ String proxyConfig = Utils.getConfigString("proxy.socks", "");
+ if (!proxyConfig.equals("")) {
+ String ip1 = Http.url(url).ignoreContentType().get().text();
+ Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
+ String ip2 = Http.url(url).ignoreContentType().get().text();
+ assertFalse(ip1.equals(ip2));
+ } else {
+ System.out.println("Skipping testSocksProxy");
+ assert(true);
+ }
+ }
+
+ // This test will only run on machines where the user has added a entry for proxy.http
+ public void testHTTPProxy() throws IOException {
+ // Unset proxy before testing
+ System.setProperty("http.proxyHost", "");
+ System.setProperty("https.proxyHost", "");
+ System.setProperty("socksProxyHost", "");
+ URL url = new URL("https://icanhazip.com");
+ String proxyConfig = Utils.getConfigString("proxy.http", "");
+ if (!proxyConfig.equals("")) {
+ String ip1 = Http.url(url).ignoreContentType().get().text();
+ Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
+ String ip2 = Http.url(url).ignoreContentType().get().text();
+ assertFalse(ip1.equals(ip2));
+ } else {
+ System.out.println("Skipping testHTTPProxy");
+ assert(true);
+ }
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
index fb991ec7..503db2c3 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java
@@ -3,7 +3,7 @@ package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URL;
-import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;;
+import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;
public class AerisdiesRipperTest extends RippersTest {
public void testAlbum() throws IOException {
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java
new file mode 100644
index 00000000..6bd8744a
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java
@@ -0,0 +1,25 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.BatoRipper;
+
+public class BatoRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ BatoRipper ripper = new BatoRipper(new URL("https://bato.to/chapter/1207152"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://bato.to/chapter/1207152");
+ BatoRipper ripper = new BatoRipper(url);
+ assertEquals("1207152", ripper.getGID(url));
+ }
+
+ public void testGetAlbumTitle() throws IOException {
+ URL url = new URL("https://bato.to/chapter/1207152");
+ BatoRipper ripper = new BatoRipper(url);
+ assertEquals("bato_1207152_I_Messed_Up_by_Teaching_at_a_Black_Gyaru_School!_Ch.2", ripper.getAlbumTitle(url));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java
new file mode 100644
index 00000000..a3c7d862
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import com.rarchives.ripme.ripper.rippers.BlackbrickroadofozRipper;
+
+import java.io.IOException;
+import java.net.URL;
+
+public class BlackbrickroadofozRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ BlackbrickroadofozRipper ripper = new BlackbrickroadofozRipper(new URL("http://www.blackbrickroadofoz.com/comic/beginning"));
+ testRipper(ripper);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java
index a3fdfd81..a3e6a9c8 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java
@@ -16,4 +16,10 @@ public class DeviantartRipperTest extends RippersTest {
DeviantartRipper ripper = new DeviantartRipper(new URL("http://faterkcx.deviantart.com/gallery/"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://airgee.deviantart.com/gallery/");
+ DeviantartRipper ripper = new DeviantartRipper(url);
+ assertEquals("airgee", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java
new file mode 100644
index 00000000..8eb8d88f
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java
@@ -0,0 +1,18 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.DynastyscansRipper;
+
+public class DynastyscansRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
+ assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
index 4a6c3539..e29c9ece 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java
@@ -17,4 +17,9 @@ public class EightmusesRipperTest extends RippersTest {
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
testRipper(ripper);
}
+
+ public void testGID() throws IOException {
+ EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
+ assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java
index 4285993b..90d66ecd 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java
@@ -10,4 +10,10 @@ public class FuraffinityRipperTest extends RippersTest {
FuraffinityRipper ripper = new FuraffinityRipper(new URL("https://www.furaffinity.net/gallery/mustardgas/"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/");
+ FuraffinityRipper ripper = new FuraffinityRipper(url);
+ assertEquals("mustardgas", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java
new file mode 100644
index 00000000..e53c78e6
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java
@@ -0,0 +1,19 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper;
+
+public class GfycatporntubeRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/");
+ GfycatporntubeRipper ripper = new GfycatporntubeRipper(url);
+ assertEquals("blowjob-bunny-puts-on-a-show", ripper.getGID(url));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
index 144606fc..f142635d 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java
@@ -7,7 +7,7 @@ import com.rarchives.ripme.ripper.rippers.Hentai2readRipper;
public class Hentai2readRipperTest extends RippersTest {
public void testHentai2readAlbum() throws IOException {
- Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/"));
+ Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/"));
testRipper(ripper);
}
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java
new file mode 100644
index 00000000..13c2798d
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java
@@ -0,0 +1,14 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.HitomiRipper;
+
+public class HitomiRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html"));
+ testRipper(ripper);
+ assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973"));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java
new file mode 100644
index 00000000..1d9ef4ad
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java
@@ -0,0 +1,25 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.HypnohubRipper;
+
+public class HypnohubRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
+ URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
+ HypnohubRipper ripper = new HypnohubRipper(poolURL);
+ testRipper(ripper);
+ ripper = new HypnohubRipper(postURL);
+ testRipper(ripper);
+ }
+ public void testGetGID() throws IOException {
+ URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
+ HypnohubRipper ripper = new HypnohubRipper(poolURL);
+ assertEquals("2303", ripper.getGID(poolURL));
+
+ URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
+ assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java
index 135a7b0a..90d76442 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java
@@ -10,4 +10,10 @@ public class ImagevenueRipperTest extends RippersTest {
ImagevenueRipper ripper = new ImagevenueRipper(new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo");
+ ImagevenueRipper ripper = new ImagevenueRipper(url);
+ assertEquals("gallery_1373818527696_191lo", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java
index db8e1680..27ebdca2 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java
@@ -10,4 +10,10 @@ public class ImgboxRipperTest extends RippersTest {
ImgboxRipper ripper = new ImgboxRipper(new URL("https://imgbox.com/g/FJPF7t26FD"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://imgbox.com/g/FJPF7t26FD");
+ ImgboxRipper ripper = new ImgboxRipper(url);
+ assertEquals("FJPF7t26FD", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java
index 46f5679f..c321a99e 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java
@@ -1,13 +1,13 @@
package com.rarchives.ripme.tst.ripper.rippers;
+import com.rarchives.ripme.ripper.rippers.ImgurRipper;
+import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
+
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
-import com.rarchives.ripme.ripper.rippers.ImgurRipper;
-import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
-
public class ImgurRipperTest extends RippersTest {
public void testImgurURLFailures() throws IOException {
@@ -17,7 +17,6 @@ public class ImgurRipperTest extends RippersTest {
failURLs.add(new URL("http://imgur.com/"));
failURLs.add(new URL("http://i.imgur.com"));
failURLs.add(new URL("http://i.imgur.com/"));
- failURLs.add(new URL("http://imgur.com/image"));
failURLs.add(new URL("http://imgur.com/image.jpg"));
failURLs.add(new URL("http://i.imgur.com/image.jpg"));
for (URL url : failURLs) {
@@ -50,6 +49,15 @@ public class ImgurRipperTest extends RippersTest {
}
}
+ public void testImgurSingleImage() throws IOException {
+ List contentURLs = new ArrayList<>();
+ contentURLs.add(new URL("http://imgur.com/qbfcLyG")); // Single image URL
+ contentURLs.add(new URL("https://imgur.com/KexUO")); // Single image URL
+ for (URL url : contentURLs) {
+ ImgurRipper ripper = new ImgurRipper(url);
+ testRipper(ripper);
+ }
+ }
public void testImgurAlbumWithMoreThan20Pictures() throws IOException {
ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq"));
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java
new file mode 100644
index 00000000..ca355a2c
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.ManganeloRipper;
+
+public class ManganeloRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ ManganeloRipper ripper = new ManganeloRipper(new URL("http://manganelo.com/manga/black_clover"));
+ testRipper(ripper);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
index 621d77c3..d2a9581b 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java
@@ -10,4 +10,9 @@ public class ModelmayhemRipperTest extends RippersTest {
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
+ assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall")));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java
new file mode 100644
index 00000000..2a0358d2
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java
@@ -0,0 +1,13 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.ModelxRipper;
+
+public class ModelxRipperTest extends RippersTest {
+ public void testModelxAlbum() throws IOException {
+ ModelxRipper ripper = new ModelxRipper(new URL("http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/"));
+ testRipper(ripper);
+ }
+}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
index 6714195d..b4d01032 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java
@@ -10,4 +10,15 @@ public class MyhentaicomicsRipperTest extends RippersTest {
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales");
+ MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
+ // Test a comic
+ assertEquals("Nienna-Lost-Tales", ripper.getGID(url));
+ // Test a search
+ assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test")));
+ // Test a tag
+ assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/")));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java
new file mode 100644
index 00000000..108feed2
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java
@@ -0,0 +1,33 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.NhentaiRipper;
+
+public class NhentaiRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/"));
+ assertEquals("233295", ripper.getGID(new URL("https://nhentai.net/g/233295/")));
+ }
+
+ // Test the tag black listing
+ public void testTagBlackList() throws IOException {
+ URL url = new URL("https://nhentai.net/g/233295/");
+ NhentaiRipper ripper = new NhentaiRipper(url);
+ // Test multiple blacklisted tags
+ String[] tags = {"test", "one", "blowjob"};
+ String blacklistedTag = ripper.checkTags(ripper.getFirstPage(), tags);
+ assertEquals("blowjob", blacklistedTag);
+
+ // test tags with spaces in them
+ String[] tags2 = {"test", "one", "sole female"};
+ blacklistedTag = ripper.checkTags(ripper.getFirstPage(), tags2);
+ assertEquals("sole female", blacklistedTag);
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java
new file mode 100644
index 00000000..3353eeb5
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java
@@ -0,0 +1,18 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.NudeGalsRipper;
+
+public class NudeGalsRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
+ assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java
index 96685cbc..74bee8d9 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java
@@ -10,4 +10,10 @@ public class PornhubRipperTest extends RippersTest {
PornhubRipper ripper = new PornhubRipper(new URL("https://www.pornhub.com/album/15680522"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://www.pornhub.com/album/15680522");
+ PornhubRipper ripper = new PornhubRipper(url);
+ assertEquals("15680522", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java
new file mode 100644
index 00000000..4c63d66e
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java
@@ -0,0 +1,20 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.Rule34Ripper;
+
+public class Rule34RipperTest extends RippersTest {
+ public void testShesFreakyRip() throws IOException {
+ Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo");
+ Rule34Ripper ripper = new Rule34Ripper(url);
+ assertEquals("bimbo", ripper.getGID(url));
+ }
+
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java
index 6bbc8890..5b57e291 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java
@@ -17,4 +17,15 @@ public class SankakuComplexRipperTest extends RippersTest {
testRipper(ripper);
}
*/
+ public void testgetGID() throws IOException {
+ URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
+ SankakuComplexRipper ripper = new SankakuComplexRipper(url);
+ assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url));
+ }
+
+ public void testgetSubDomain() throws IOException {
+ URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
+ SankakuComplexRipper ripper = new SankakuComplexRipper(url);
+ assertEquals("idol.", ripper.getSubDomain(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java
index 3efa31b4..65f371d1 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java
@@ -13,4 +13,10 @@ public class ShesFreakyRipperTest extends RippersTest {
testRipper(ripper);
}
*/
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html");
+ ShesFreakyRipper ripper = new ShesFreakyRipper(url);
+ assertEquals("nicee-snow-bunny-579NbPjUcYa", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java
index c46e922c..c4f56432 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java
@@ -10,4 +10,10 @@ public class SinfestRipperTest extends RippersTest {
SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://sinfest.net/view.php?date=2000-01-17");
+ SinfestRipper ripper = new SinfestRipper(url);
+ assertEquals("2000-01-17", ripper.getGID(url));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java
index d1ce6b33..3866b6ba 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinnercomicsRipperTest.java
@@ -10,4 +10,10 @@ public class SinnercomicsRipperTest extends RippersTest {
SinnercomicsRipper ripper = new SinnercomicsRipper(new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/");
+ SinnercomicsRipper ripper = new SinnercomicsRipper(url);
+ assertEquals("beyond-the-hotel", ripper.getGID(url));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java
new file mode 100644
index 00000000..c7aa694e
--- /dev/null
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java
@@ -0,0 +1,19 @@
+package com.rarchives.ripme.tst.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+import com.rarchives.ripme.ripper.rippers.SmuttyRipper;
+
+public class SmuttyRipperTest extends RippersTest {
+ public void testRip() throws IOException {
+ SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/"));
+ testRipper(ripper);
+ }
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://smutty.com/user/QUIGON/");
+ SmuttyRipper ripper = new SmuttyRipper(url);
+ assertEquals("QUIGON", ripper.getGID(url));
+ }
+}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java
index 128f3f17..c9aded3e 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java
@@ -10,4 +10,10 @@ public class StaRipperTest extends RippersTest {
StaRipper ripper = new StaRipper(new URL("https://sta.sh/2hn9rtavr1g"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://sta.sh/2hn9rtavr1g");
+ StaRipper ripper = new StaRipper(url);
+ assertEquals("2hn9rtavr1g", ripper.getGID(url));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java
index 9617e4b8..f9e448e6 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java
@@ -10,4 +10,10 @@ public class TapasticRipperTest extends RippersTest {
TapasticRipper ripper = new TapasticRipper(new URL("https://tapas.io/series/tsiwbakd-comic"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://tapas.io/series/tsiwbakd-comic");
+ TapasticRipper ripper = new TapasticRipper(url);
+ assertEquals("series_ tsiwbakd-comic", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java
index aa43103b..a402ebc3 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java
@@ -10,4 +10,10 @@ public class TeenplanetRipperTest extends RippersTest {
TeenplanetRipper ripper = new TeenplanetRipper(new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html");
+ TeenplanetRipper ripper = new TeenplanetRipper(url);
+ assertEquals("the-perfect-side-of-me-6588", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java
index 3c7a8fbd..91fc0617 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java
@@ -10,4 +10,10 @@ public class TheyiffgalleryRipperTest extends RippersTest {
TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URL("https://theyiffgallery.com/index?/category/4303"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://theyiffgallery.com/index?/category/4303");
+ TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(url);
+ assertEquals("4303", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java
index ccbeb8bb..9659d630 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java
@@ -10,5 +10,11 @@ public class VidbleRipperTest extends RippersTest {
VidbleRipper ripper = new VidbleRipper(new URL("http://www.vidble.com/album/y1oyh3zd"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://www.vidble.com/album/y1oyh3zd");
+ VidbleRipper ripper = new VidbleRipper(url);
+ assertEquals("y1oyh3zd", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java
index ae31192b..6a7df184 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java
@@ -38,14 +38,17 @@ public class VideoRippersTest extends RippersTest {
}
}
- public void testTwitchVideoRipper() throws IOException {
- List contentURLs = new ArrayList<>();
- contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull"));
- for (URL url : contentURLs) {
- TwitchVideoRipper ripper = new TwitchVideoRipper(url);
- videoTestHelper(ripper);
- }
- }
+
+// Test disbaled. See https://github.com/RipMeApp/ripme/issues/574
+
+// public void testTwitchVideoRipper() throws IOException {
+// List contentURLs = new ArrayList<>();
+// contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull"));
+// for (URL url : contentURLs) {
+// TwitchVideoRipper ripper = new TwitchVideoRipper(url);
+// videoTestHelper(ripper);
+// }
+// }
public void testXhamsterRipper() throws IOException {
List contentURLs = new ArrayList<>();
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java
deleted file mode 100644
index 343a72b0..00000000
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineRipperTest.java
+++ /dev/null
@@ -1,16 +0,0 @@
-package com.rarchives.ripme.tst.ripper.rippers;
-
-import java.io.IOException;
-import java.net.URL;
-
-import com.rarchives.ripme.ripper.rippers.VineRipper;
-
-public class VineRipperTest extends RippersTest {
- // https://github.com/RipMeApp/ripme/issues/181
- /*
- public void testVineRip() throws IOException {
- VineRipper ripper = new VineRipper(new URL("https://vine.co/u/954440445776334848"));
- testRipper(ripper);
- }
- */
-}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java
index 4aa50d0a..16407ad7 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java
@@ -15,4 +15,10 @@ public class WebtoonsRipperTest extends RippersTest {
WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://www.webtoons.com/en/drama/my-boo/ep-33/viewer?title_no=1185&episode_no=33");
+ WebtoonsRipper ripper = new WebtoonsRipper(url);
+ assertEquals("my-boo", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
index 2f7dbcf9..8879c561 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java
@@ -55,6 +55,7 @@ public class WordpressComicRipperTest extends RippersTest {
WordpressComicRipper ripper = new WordpressComicRipper(
new URL("http://www.konradokonski.com/sawdust/comic/get-up/"));
testRipper(ripper);
+
}
public void test_konradokonski_2() throws IOException {
@@ -63,6 +64,13 @@ public class WordpressComicRipperTest extends RippersTest {
testRipper(ripper);
}
+ public void test_konradokonski_getAlbumTitle() throws IOException {
+ URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url));
+
+ }
+
/*
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
public void test_freeadultcomix() throws IOException {
@@ -83,6 +91,32 @@ public class WordpressComicRipperTest extends RippersTest {
new URL("http://tnbtu.com/comic/01-00/"));
testRipper(ripper);
}
+
+ public void test_Eightmuses_download() throws IOException {
+ WordpressComicRipper ripper = new WordpressComicRipper(
+ new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"));
+ testRipper(ripper);
+ }
+
+ public void test_Eightmuses_getAlbumTitle() throws IOException {
+ URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses",
+ ripper.getAlbumTitle(url));
+ }
+
+ public void test_spyingwithlana_download() throws IOException {
+ WordpressComicRipper ripper = new WordpressComicRipper(
+ new URL("http://spyingwithlana.com/comic/the-big-hookup/"));
+ testRipper(ripper);
+ }
+
+ public void test_spyingwithlana_getAlbumTitle() throws IOException {
+ URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/");
+ WordpressComicRipper ripper = new WordpressComicRipper(url);
+ assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url));
+ }
+
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
// public void test_pepsaga() throws IOException {
// WordpressComicRipper ripper = new WordpressComicRipper(
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XbooruRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XbooruRipperTest.java
index 8eefd4e3..6dfc0bab 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XbooruRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XbooruRipperTest.java
@@ -10,4 +10,10 @@ public class XbooruRipperTest extends RippersTest {
XbooruRipper ripper = new XbooruRipper(new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry");
+ XbooruRipper ripper = new XbooruRipper(url);
+ assertEquals("furry", ripper.getGID(url));
+ }
}
\ No newline at end of file
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java
index f66b27d1..54b22eb0 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java
@@ -16,4 +16,10 @@ public class XhamsterRipperTest extends RippersTest {
XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664");
+ XhamsterRipper ripper = new XhamsterRipper(url);
+ assertEquals("7254664", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java
index a74b76e9..1a304468 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java
@@ -16,4 +16,10 @@ public class YuvutuRipperTest extends RippersTest {
YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333");
+ YuvutuRipper ripper = new YuvutuRipper(url);
+ assertEquals("420333", ripper.getGID(url));
+ }
}
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java
index de953be8..9facf481 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java
@@ -10,4 +10,16 @@ public class ZizkiRipperTest extends RippersTest {
ZizkiRipper ripper = new ZizkiRipper(new URL("http://zizki.com/dee-chorde/we-got-spirit"));
testRipper(ripper);
}
+
+ public void testGetGID() throws IOException {
+ URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit");
+ ZizkiRipper ripper = new ZizkiRipper(url);
+ assertEquals("dee-chorde", ripper.getGID(url));
+ }
+
+ public void testAlbumTitle() throws IOException {
+ URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit");
+ ZizkiRipper ripper = new ZizkiRipper(url);
+ assertEquals("zizki_Dee Chorde_We Got Spirit", ripper.getAlbumTitle(url));
+ }
}