commit
4f9b912bf3
@ -30,8 +30,6 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
|
|||||||
|
|
||||||
## [Changelog](https://github.com/ripmeapp/ripme/blob/master/ripme.json) (ripme.json)
|
## [Changelog](https://github.com/ripmeapp/ripme/blob/master/ripme.json) (ripme.json)
|
||||||
|
|
||||||
## [Website](http://rip.rarchives.com/)
|
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
* Quickly downloads all images in an online album (see supported sites below)
|
* Quickly downloads all images in an online album (see supported sites below)
|
||||||
|
2
pom.xml
2
pom.xml
@ -4,7 +4,7 @@
|
|||||||
<groupId>com.rarchives.ripme</groupId>
|
<groupId>com.rarchives.ripme</groupId>
|
||||||
<artifactId>ripme</artifactId>
|
<artifactId>ripme</artifactId>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<version>1.7.27</version>
|
<version>1.7.47</version>
|
||||||
<name>ripme</name>
|
<name>ripme</name>
|
||||||
<url>http://rip.rarchives.com</url>
|
<url>http://rip.rarchives.com</url>
|
||||||
<properties>
|
<properties>
|
||||||
|
22
ripme.json
22
ripme.json
@ -1,6 +1,26 @@
|
|||||||
{
|
{
|
||||||
"latestVersion": "1.7.27",
|
"latestVersion": "1.7.47",
|
||||||
"changeList": [
|
"changeList": [
|
||||||
|
"1.7.47: Added quick queue support for hentai2read ripper; Fixed instagram ripper; SankakuComplexRipper can now download from different subdomains; Added ripper for bato.to; Added quick queue support for 8muses.download; ",
|
||||||
|
"1.7.46: Fixed hentai2read ripper; Rewrote the myhentaicomics ripper to use the new getAlbumsToQueue func; Can now blacklist nhentai tags; SinnercomicsRipper no longer adds -page-01 to folder names; EightmusesRipper now adds file extension to filename; disbaled test for twitch ripper",
|
||||||
|
"1.7.45: Fixed hentai2read ripper; ImageBam album fixed; Added various translations; TsuminoRipper no longer requires album name to download",
|
||||||
|
"1.7.44: Fixed instagram ripper regex",
|
||||||
|
"1.7.43: Fixed queryId regex in instagram ripper",
|
||||||
|
"1.7.42: Added user support to SmuttyRipper; Removed vine ripper; Fixed NudeGalsRipper; addURLToDownload improvments; Fixed Instagram ripper",
|
||||||
|
"1.7.41: Added support for spyingwithlana.com; Added ManganeloRipper; Added support for dynasty-scans.com",
|
||||||
|
"1.7.40: Added hypnohub.net ripper; Fixed rule34.xxx ripper; Tsumino Ripper now add .png to filenames",
|
||||||
|
"1.7.39: Added rule34.xxx ripper; Added Gfycatporntube.com ripper; Fixed AbstractRipper subdir bug; Added AbstractRipper unit tests",
|
||||||
|
"1.7.38: Added http and socks proxy support; Extended some unit tests to include getGid; Added HitomiRipper; hentaifoundry ripper now can rip all images from accounts",
|
||||||
|
"1.7.37: MInor code clean up; Added socks proxy support; Added support for 8muses.download; Hentaifoundry no longer errors when there are no more pages; Fix bug that causes tumblr to replace https with httpss when downloading resized images",
|
||||||
|
"1.7.36: Fixed Instagram ripper; Fixed hentai2read ripper test; Fixed tnbtu.com ripper",
|
||||||
|
"1.7.35: Fixed instagram ripper; hentai2read ripper now properly names folders",
|
||||||
|
"1.7.34: Added Blackbrickroadofoz Ripper; Fixed webtoons regex",
|
||||||
|
"1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
|
||||||
|
"1.7.32: Instagram ripper update to use new enpoints",
|
||||||
|
"1.7.31: InstaGram ripper no longer errors out when getting next page",
|
||||||
|
"1.7.30: Fixed usage of command-line on non-headless systems",
|
||||||
|
"1.7.29: Cano now download single images from imgur; Improved handling of headless mode & OS-specific config; Added modelx ripper; Fixed eroshae ripper",
|
||||||
|
"1.7.28: IG ripper now uses display_url when downloading images; Reddit ripper now gets erome links; Hentaifoundry Ripper no longer errors out when there is no next page",
|
||||||
"1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
|
"1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
|
||||||
"1.7.26: fixed instagram ripper",
|
"1.7.26: fixed instagram ripper",
|
||||||
"1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
|
"1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package com.rarchives.ripme;
|
package com.rarchives.ripme;
|
||||||
|
|
||||||
|
import java.awt.*;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
@ -18,6 +19,7 @@ import org.apache.commons.cli.CommandLine;
|
|||||||
import org.apache.commons.cli.HelpFormatter;
|
import org.apache.commons.cli.HelpFormatter;
|
||||||
import org.apache.commons.cli.Options;
|
import org.apache.commons.cli.Options;
|
||||||
import org.apache.commons.cli.ParseException;
|
import org.apache.commons.cli.ParseException;
|
||||||
|
import org.apache.commons.lang.SystemUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
@ -25,48 +27,64 @@ import com.rarchives.ripme.ui.History;
|
|||||||
import com.rarchives.ripme.ui.HistoryEntry;
|
import com.rarchives.ripme.ui.HistoryEntry;
|
||||||
import com.rarchives.ripme.ui.MainWindow;
|
import com.rarchives.ripme.ui.MainWindow;
|
||||||
import com.rarchives.ripme.ui.UpdateUtils;
|
import com.rarchives.ripme.ui.UpdateUtils;
|
||||||
|
import com.rarchives.ripme.utils.Proxy;
|
||||||
import com.rarchives.ripme.utils.RipUtils;
|
import com.rarchives.ripme.utils.RipUtils;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Entry point to application.
|
* Entry point to application.
|
||||||
|
* This is where all the fun happens, with the main method.
|
||||||
* Decides to display UI or to run silently via command-line.
|
* Decides to display UI or to run silently via command-line.
|
||||||
|
*
|
||||||
|
* As the "controller" to all other classes, it parses command line parameters and loads the history.
|
||||||
*/
|
*/
|
||||||
public class App {
|
public class App {
|
||||||
|
|
||||||
public static final Logger logger;
|
public static final Logger logger = Logger.getLogger(App.class);
|
||||||
private static final History HISTORY = new History();
|
private static final History HISTORY = new History();
|
||||||
|
|
||||||
static {
|
/**
|
||||||
//initialize logger
|
* Where everything starts. Takes in, and tries to parse as many commandline arguments as possible.
|
||||||
Utils.configureLogger();
|
* Otherwise, it launches a GUI.
|
||||||
logger = Logger.getLogger(App.class);
|
*
|
||||||
}
|
* @param args Array of command line arguments.
|
||||||
|
*/
|
||||||
public static void main(String[] args) throws MalformedURLException {
|
public static void main(String[] args) throws MalformedURLException {
|
||||||
CommandLine cl = getArgs(args);
|
CommandLine cl = getArgs(args);
|
||||||
|
|
||||||
if (args.length > 0 && cl.hasOption('v')){
|
if (args.length > 0 && cl.hasOption('v')){
|
||||||
logger.error(UpdateUtils.getThisJarVersion());
|
logger.info(UpdateUtils.getThisJarVersion());
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
System.setProperty("apple.laf.useScreenMenuBar", "true");
|
if (Utils.getConfigString("proxy.http", null) != null) {
|
||||||
System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
|
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", null));
|
||||||
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
} else if (Utils.getConfigString("proxy.socks", null) != null) {
|
||||||
|
Proxy.setSocks(Utils.getConfigString("proxy.socks", null));
|
||||||
|
}
|
||||||
|
|
||||||
if (args.length > 0) {
|
if (GraphicsEnvironment.isHeadless() || args.length > 0) {
|
||||||
// CLI Mode
|
|
||||||
handleArguments(args);
|
handleArguments(args);
|
||||||
} else {
|
} else {
|
||||||
// GUI Mode
|
if (SystemUtils.IS_OS_MAC_OSX) {
|
||||||
|
System.setProperty("apple.laf.useScreenMenuBar", "true");
|
||||||
|
System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
|
||||||
|
}
|
||||||
|
|
||||||
|
Utils.configureLogger();
|
||||||
|
|
||||||
|
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
||||||
|
|
||||||
MainWindow mw = new MainWindow();
|
MainWindow mw = new MainWindow();
|
||||||
SwingUtilities.invokeLater(mw);
|
SwingUtilities.invokeLater(mw);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an abstract ripper and instructs it to rip.
|
* Creates an abstract ripper and instructs it to rip.
|
||||||
* @param url URL to be ripped
|
* @param url URL to be ripped
|
||||||
* @throws Exception
|
* @throws Exception Nothing too specific here, just a catch-all.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
private static void rip(URL url) throws Exception {
|
private static void rip(URL url) throws Exception {
|
||||||
AbstractRipper ripper = AbstractRipper.getRipper(url);
|
AbstractRipper ripper = AbstractRipper.getRipper(url);
|
||||||
@ -80,20 +98,45 @@ public class App {
|
|||||||
*/
|
*/
|
||||||
private static void handleArguments(String[] args) {
|
private static void handleArguments(String[] args) {
|
||||||
CommandLine cl = getArgs(args);
|
CommandLine cl = getArgs(args);
|
||||||
if (cl.hasOption('h')) {
|
|
||||||
|
//Help (list commands)
|
||||||
|
if (cl.hasOption('h') || args.length == 0) {
|
||||||
HelpFormatter hf = new HelpFormatter();
|
HelpFormatter hf = new HelpFormatter();
|
||||||
hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
|
hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Utils.configureLogger();
|
||||||
|
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
||||||
|
|
||||||
|
//Allow file overwriting
|
||||||
if (cl.hasOption('w')) {
|
if (cl.hasOption('w')) {
|
||||||
Utils.setConfigBoolean("file.overwrite", true);
|
Utils.setConfigBoolean("file.overwrite", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//SOCKS proxy server
|
||||||
|
if (cl.hasOption('s')) {
|
||||||
|
String sservfull = cl.getOptionValue('s').trim();
|
||||||
|
Proxy.setSocks(sservfull);
|
||||||
|
}
|
||||||
|
|
||||||
|
//HTTP proxy server
|
||||||
|
if (cl.hasOption('p')) {
|
||||||
|
String proxyserverfull = cl.getOptionValue('p').trim();
|
||||||
|
Proxy.setHTTPProxy(proxyserverfull);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Number of threads
|
||||||
if (cl.hasOption('t')) {
|
if (cl.hasOption('t')) {
|
||||||
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
|
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Ignore 404
|
||||||
if (cl.hasOption('4')) {
|
if (cl.hasOption('4')) {
|
||||||
Utils.setConfigBoolean("errors.skip404", true);
|
Utils.setConfigBoolean("errors.skip404", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Re-rip <i>all</i> previous albums
|
||||||
if (cl.hasOption('r')) {
|
if (cl.hasOption('r')) {
|
||||||
// Re-rip all via command-line
|
// Re-rip all via command-line
|
||||||
List<String> history = Utils.getConfigList("download.history");
|
List<String> history = Utils.getConfigList("download.history");
|
||||||
@ -115,6 +158,8 @@ public class App {
|
|||||||
// Exit
|
// Exit
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Re-rip all <i>selected</i> albums
|
||||||
if (cl.hasOption('R')) {
|
if (cl.hasOption('R')) {
|
||||||
loadHistory();
|
loadHistory();
|
||||||
if (HISTORY.toList().isEmpty()) {
|
if (HISTORY.toList().isEmpty()) {
|
||||||
@ -146,20 +191,30 @@ public class App {
|
|||||||
System.exit(-1);
|
System.exit(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Save the order of images in album
|
||||||
if (cl.hasOption('d')) {
|
if (cl.hasOption('d')) {
|
||||||
Utils.setConfigBoolean("download.save_order", true);
|
Utils.setConfigBoolean("download.save_order", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Don't save the order of images in album
|
||||||
if (cl.hasOption('D')) {
|
if (cl.hasOption('D')) {
|
||||||
Utils.setConfigBoolean("download.save_order", false);
|
Utils.setConfigBoolean("download.save_order", false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//In case specify both, break and exit since it isn't possible.
|
||||||
if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
|
if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
|
||||||
logger.error("\nCannot specify '-d' and '-D' simultaneously");
|
logger.error("\nCannot specify '-d' and '-D' simultaneously");
|
||||||
System.exit(-1);
|
System.exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Destination directory
|
||||||
if (cl.hasOption('l')) {
|
if (cl.hasOption('l')) {
|
||||||
// change the default rips directory
|
// change the default rips directory
|
||||||
Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
|
Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Read URLs from File
|
||||||
if (cl.hasOption('f')) {
|
if (cl.hasOption('f')) {
|
||||||
String filename = cl.getOptionValue('f');
|
String filename = cl.getOptionValue('f');
|
||||||
try {
|
try {
|
||||||
@ -175,10 +230,13 @@ public class App {
|
|||||||
logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
|
logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//The URL to rip.
|
||||||
if (cl.hasOption('u')) {
|
if (cl.hasOption('u')) {
|
||||||
String url = cl.getOptionValue('u').trim();
|
String url = cl.getOptionValue('u').trim();
|
||||||
ripURL(url, cl.hasOption("n"));
|
ripURL(url, cl.hasOption("n"));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -226,6 +284,8 @@ public class App {
|
|||||||
opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
|
opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
|
||||||
opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
|
opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
|
||||||
opts.addOption("v", "version", false, "Show current version");
|
opts.addOption("v", "version", false, "Show current version");
|
||||||
|
opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
|
||||||
|
opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
|
||||||
return opts;
|
return opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +304,7 @@ public class App {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads history from history file into memory.
|
* Loads history from history file into memory.
|
||||||
*/
|
*/
|
||||||
|
@ -11,6 +11,7 @@ import org.jsoup.nodes.Document;
|
|||||||
|
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import com.rarchives.ripme.ui.MainWindow;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simplified ripper, designed for ripping from sites by parsing HTML.
|
* Simplified ripper, designed for ripping from sites by parsing HTML.
|
||||||
@ -53,12 +54,29 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
protected boolean hasDescriptionSupport() {
|
protected boolean hasDescriptionSupport() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String[] getDescription(String url, Document page) throws IOException {
|
protected String[] getDescription(String url, Document page) throws IOException {
|
||||||
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
|
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
|
||||||
}
|
}
|
||||||
protected int descSleepTime() {
|
protected int descSleepTime() {
|
||||||
return 100;
|
return 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected List<String> getAlbumsToQueue(Document doc) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a page has Queue support then it has no images we want to download, just a list of urls we want to add to
|
||||||
|
// the queue
|
||||||
|
protected boolean hasQueueSupport() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Takes a url and checks if it is for a page of albums
|
||||||
|
protected boolean pageContainsAlbums(URL url) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
int index = 0;
|
int index = 0;
|
||||||
@ -67,6 +85,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||||
Document doc = getFirstPage();
|
Document doc = getFirstPage();
|
||||||
|
|
||||||
|
if (hasQueueSupport() && pageContainsAlbums(this.url)) {
|
||||||
|
List<String> urls = getAlbumsToQueue(doc);
|
||||||
|
for (String url : urls) {
|
||||||
|
MainWindow.addUrlToQueue(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We set doc to null here so the while loop below this doesn't fire
|
||||||
|
doc = null;
|
||||||
|
}
|
||||||
|
|
||||||
while (doc != null) {
|
while (doc != null) {
|
||||||
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
||||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||||
|
@ -192,7 +192,8 @@ public abstract class AbstractRipper
|
|||||||
* True if downloaded successfully
|
* True if downloaded successfully
|
||||||
* False if failed to download
|
* False if failed to download
|
||||||
*/
|
*/
|
||||||
protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies);
|
protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies,
|
||||||
|
Boolean getFileExtFromMIME);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Queues image to be downloaded and saved.
|
* Queues image to be downloaded and saved.
|
||||||
@ -212,7 +213,7 @@ public abstract class AbstractRipper
|
|||||||
* True if downloaded successfully
|
* True if downloaded successfully
|
||||||
* False if failed to download
|
* False if failed to download
|
||||||
*/
|
*/
|
||||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName, String extension, Boolean getFileExtFromMIME) {
|
||||||
// Don't re-add the url if it was downloaded in a previous rip
|
// Don't re-add the url if it was downloaded in a previous rip
|
||||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||||
if (hasDownloadedURL(url.toExternalForm())) {
|
if (hasDownloadedURL(url.toExternalForm())) {
|
||||||
@ -228,21 +229,7 @@ public abstract class AbstractRipper
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||||
String saveAs;
|
String saveAs = getFileName(url, fileName, extension);
|
||||||
if (fileName != null) {
|
|
||||||
saveAs = fileName;
|
|
||||||
// Get the extension of the file
|
|
||||||
String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
|
|
||||||
saveAs = saveAs + "." + extension;
|
|
||||||
} else {
|
|
||||||
saveAs = url.toExternalForm();
|
|
||||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
|
||||||
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
|
||||||
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
|
||||||
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
|
||||||
File saveFileAs;
|
File saveFileAs;
|
||||||
try {
|
try {
|
||||||
if (!subdirectory.equals("")) {
|
if (!subdirectory.equals("")) {
|
||||||
@ -271,7 +258,15 @@ public abstract class AbstractRipper
|
|||||||
logger.debug("Unable to write URL history file");
|
logger.debug("Unable to write URL history file");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return addURLToDownload(url, saveFileAs, referrer, cookies);
|
return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies, String fileName, String extension) {
|
||||||
|
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
||||||
|
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -306,6 +301,35 @@ public abstract class AbstractRipper
|
|||||||
return addURLToDownload(url, prefix, "");
|
return addURLToDownload(url, prefix, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String getFileName(URL url, String fileName, String extension) {
|
||||||
|
String saveAs;
|
||||||
|
if (fileName != null) {
|
||||||
|
saveAs = fileName;
|
||||||
|
} else {
|
||||||
|
saveAs = url.toExternalForm();
|
||||||
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||||
|
}
|
||||||
|
if (extension == null) {
|
||||||
|
// Get the extension of the file
|
||||||
|
String[] lastBitOfURL = url.toExternalForm().split("/");
|
||||||
|
|
||||||
|
String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split(".");
|
||||||
|
if (lastBit.length != 0) {
|
||||||
|
extension = lastBit[lastBit.length - 1];
|
||||||
|
saveAs = saveAs + "." + extension;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
||||||
|
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
||||||
|
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
||||||
|
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
||||||
|
if (extension != null) {
|
||||||
|
saveAs = saveAs + "." + extension;
|
||||||
|
}
|
||||||
|
return saveAs;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for downloading threads to complete.
|
* Waits for downloading threads to complete.
|
||||||
|
@ -50,7 +50,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
/**
|
/**
|
||||||
* Queues multiple URLs of single images to download from a single Album URL
|
* Queues multiple URLs of single images to download from a single Album URL
|
||||||
*/
|
*/
|
||||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
||||||
// Only download one file if this is a test.
|
// Only download one file if this is a test.
|
||||||
if (super.isThisATest() &&
|
if (super.isThisATest() &&
|
||||||
(itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
|
(itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
|
||||||
@ -82,7 +82,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
itemsPending.put(url, saveAs);
|
itemsPending.put(url, saveAs);
|
||||||
DownloadFileThread dft = new DownloadFileThread(url, saveAs, this);
|
DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME);
|
||||||
if (referrer != null) {
|
if (referrer != null) {
|
||||||
dft.setReferrer(referrer);
|
dft.setReferrer(referrer);
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean addURLToDownload(URL url, File saveAs) {
|
public boolean addURLToDownload(URL url, File saveAs) {
|
||||||
return addURLToDownload(url, saveAs, null, null);
|
return addURLToDownload(url, saveAs, null, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -8,6 +8,7 @@ import java.io.InputStream;
|
|||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.net.URLConnection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -36,10 +37,11 @@ class DownloadFileThread extends Thread {
|
|||||||
private String prettySaveAs;
|
private String prettySaveAs;
|
||||||
private AbstractRipper observer;
|
private AbstractRipper observer;
|
||||||
private int retries;
|
private int retries;
|
||||||
|
private Boolean getFileExtFromMIME;
|
||||||
|
|
||||||
private final int TIMEOUT;
|
private final int TIMEOUT;
|
||||||
|
|
||||||
public DownloadFileThread(URL url, File saveAs, AbstractRipper observer) {
|
public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) {
|
||||||
super();
|
super();
|
||||||
this.url = url;
|
this.url = url;
|
||||||
this.saveAs = saveAs;
|
this.saveAs = saveAs;
|
||||||
@ -47,6 +49,7 @@ class DownloadFileThread extends Thread {
|
|||||||
this.observer = observer;
|
this.observer = observer;
|
||||||
this.retries = Utils.getConfigInteger("download.retries", 1);
|
this.retries = Utils.getConfigInteger("download.retries", 1);
|
||||||
this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
|
this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
|
||||||
|
this.getFileExtFromMIME = getFileExtFromMIME;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setReferrer(String referrer) {
|
public void setReferrer(String referrer) {
|
||||||
@ -143,9 +146,15 @@ class DownloadFileThread extends Thread {
|
|||||||
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
|
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save file
|
// Save file
|
||||||
bis = new BufferedInputStream(huc.getInputStream());
|
bis = new BufferedInputStream(huc.getInputStream());
|
||||||
|
|
||||||
|
// Check if we should get the file ext from the MIME type
|
||||||
|
if (getFileExtFromMIME) {
|
||||||
|
String fileExt = URLConnection.guessContentTypeFromStream(bis).replaceAll("image/", "");
|
||||||
|
saveAs = new File(saveAs.toString() + "." + fileExt);
|
||||||
|
}
|
||||||
|
|
||||||
fos = new FileOutputStream(saveAs);
|
fos = new FileOutputStream(saveAs);
|
||||||
IOUtils.copy(bis, fos);
|
IOUtils.copy(bis, fos);
|
||||||
break; // Download successful: break out of infinite loop
|
break; // Download successful: break out of infinite loop
|
||||||
|
@ -10,6 +10,7 @@ import java.util.Map;
|
|||||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import com.sun.org.apache.xpath.internal.operations.Bool;
|
||||||
|
|
||||||
public abstract class VideoRipper extends AbstractRipper {
|
public abstract class VideoRipper extends AbstractRipper {
|
||||||
|
|
||||||
@ -70,7 +71,7 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
||||||
return addURLToDownload(url, saveAs);
|
return addURLToDownload(url, saveAs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
137
src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
Normal file
137
src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.json.JSONObject;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class BatoRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public BatoRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "bato";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "bato.to";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
// As this is just for quick queue support it does matter what this if returns
|
||||||
|
p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected bato.to URL format: " +
|
||||||
|
"bato.to/chapter/ID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasQueueSupport() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean pageContainsAlbums(URL url) {
|
||||||
|
Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getAlbumsToQueue(Document doc) {
|
||||||
|
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||||
|
for (Element elem : doc.select("div.main > div > a")) {
|
||||||
|
urlsToAddToQueue.add("https://" + getDomain() + elem.attr("href"));
|
||||||
|
}
|
||||||
|
return urlsToAddToQueue;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||||
|
try {
|
||||||
|
// Attempt to use album title as GID
|
||||||
|
return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Fall back to default album naming convention
|
||||||
|
logger.info("Unable to find title at " + url);
|
||||||
|
}
|
||||||
|
return super.getAlbumTitle(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean canRip(URL url) {
|
||||||
|
Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (Element script : doc.select("script")) {
|
||||||
|
if (script.data().contains("var images = ")) {
|
||||||
|
String s = script.data();
|
||||||
|
s = s.replaceAll("var seriesId = \\d+;", "");
|
||||||
|
s = s.replaceAll("var chapterId = \\d+;", "");
|
||||||
|
s = s.replaceAll("var pages = \\d+;", "");
|
||||||
|
s = s.replaceAll("var page = \\d+;", "");
|
||||||
|
s = s.replaceAll("var prevCha = null;", "");
|
||||||
|
s = s.replaceAll("var nextCha = \\.*;", "");
|
||||||
|
String json = s.replaceAll("var images = ", "").replaceAll(";", "");
|
||||||
|
logger.info(s);
|
||||||
|
JSONObject images = new JSONObject(json);
|
||||||
|
for (int i = 1; i < images.length() +1; i++) {
|
||||||
|
result.add(images.getString(Integer.toString(i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,76 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class BlackbrickroadofozRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public BlackbrickroadofozRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "blackbrickroadofoz";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "blackbrickroadofoz.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://www.blackbrickroadofoz.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected blackbrickroadofoz URL format: " +
|
||||||
|
"www.blackbrickroadofoz.com/comic/PAGE - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
sleep(1000);
|
||||||
|
Element elem = doc.select("div[id=topnav] > nav.cc-nav > a.cc-next").first();
|
||||||
|
if (elem == null) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
String nextPage = elem.attr("href");
|
||||||
|
return Http.url(nextPage).get();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
Element elem = doc.select("div[id=cc-comicbody] > a > img[id=cc-comic]").first();
|
||||||
|
// The site doesn't return properly encoded urls we replace all spaces ( ) with %20
|
||||||
|
result.add(elem.attr("src").replaceAll(" ", "%20"));
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -59,7 +59,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
|
|||||||
String nextPage = elem.attr("href");
|
String nextPage = elem.attr("href");
|
||||||
// Some times this returns a empty string
|
// Some times this returns a empty string
|
||||||
// This for stops that
|
// This for stops that
|
||||||
if (nextPage == "") {
|
if (nextPage.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
package com.rarchives.ripme.ripper.rippers;
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Base64;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
@ -13,7 +17,6 @@ import java.util.Map;
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.jsoup.Connection.Method;
|
import org.jsoup.Connection.Method;
|
||||||
import org.jsoup.Connection.Response;
|
import org.jsoup.Connection.Response;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
@ -22,11 +25,6 @@ import org.jsoup.nodes.Element;
|
|||||||
import org.jsoup.safety.Whitelist;
|
import org.jsoup.safety.Whitelist;
|
||||||
import org.jsoup.select.Elements;
|
import org.jsoup.select.Elements;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
|
||||||
import com.rarchives.ripme.utils.Base64;
|
|
||||||
import com.rarchives.ripme.utils.Http;
|
|
||||||
import com.rarchives.ripme.utils.Utils;
|
|
||||||
|
|
||||||
public class DeviantartRipper extends AbstractHTMLRipper {
|
public class DeviantartRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
private static final int PAGE_SLEEP_TIME = 3000,
|
private static final int PAGE_SLEEP_TIME = 3000,
|
||||||
@ -108,19 +106,46 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
throw new MalformedURLException("Expected URL format: http://username.deviantart.com/[/gallery/#####], got: " + url);
|
throw new MalformedURLException("Expected URL format: http://username.deviantart.com/[/gallery/#####], got: " + url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets first page.
|
||||||
|
* Will determine if login is supplied,
|
||||||
|
* if there is a login, then login and add that login cookies.
|
||||||
|
* Otherwise, just bypass the age gate with an anonymous flag.
|
||||||
|
* @return
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
// Login
|
|
||||||
try {
|
//Test to see if there is a login:
|
||||||
cookies = loginToDeviantart();
|
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
|
||||||
} catch (Exception e) {
|
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||||
logger.warn("Failed to login: ", e);
|
|
||||||
|
if (username == null || password == null) {
|
||||||
|
logger.debug("No DeviantArt login provided.");
|
||||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||||
|
} else {
|
||||||
|
// Attempt Login
|
||||||
|
try {
|
||||||
|
cookies = loginToDeviantart();
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.warn("Failed to login: ", e);
|
||||||
|
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return Http.url(this.url)
|
return Http.url(this.url)
|
||||||
.cookies(cookies)
|
.cookies(cookies)
|
||||||
.get();
|
.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param page
|
||||||
|
* @param id
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
private String jsonToImage(Document page, String id) {
|
private String jsonToImage(Document page, String id) {
|
||||||
Elements js = page.select("script[type=\"text/javascript\"]");
|
Elements js = page.select("script[type=\"text/javascript\"]");
|
||||||
for (Element tag : js) {
|
for (Element tag : js) {
|
||||||
@ -444,4 +469,4 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
// We are logged in, save the cookies
|
// We are logged in, save the cookies
|
||||||
return resp.cookies();
|
return resp.cookies();
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,84 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.json.JSONArray;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class DynastyscansRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public DynastyscansRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "dynasty-scans";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "dynasty-scans.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://dynasty-scans.com/chapters/([\\S]+)/?$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected dynasty-scans URL format: " +
|
||||||
|
"dynasty-scans.com/chapters/ID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
Element elem = doc.select("a[id=next_link]").first();
|
||||||
|
if (elem == null || elem.attr("href").equals("#")) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
return Http.url("https://dynasty-scans.com" + elem.attr("href")).get();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
String jsonText = null;
|
||||||
|
for (Element script : doc.select("script")) {
|
||||||
|
if (script.data().contains("var pages")) {
|
||||||
|
jsonText = script.data().replaceAll("var pages = ", "");
|
||||||
|
jsonText = jsonText.replaceAll("//<!\\[CDATA\\[", "");
|
||||||
|
jsonText = jsonText.replaceAll("//]]>", "");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
JSONArray imageArray = new JSONArray(jsonText);
|
||||||
|
for (int i = 0; i < imageArray.length(); i++) {
|
||||||
|
result.add("https://dynasty-scans.com" + imageArray.getJSONObject(i).getString("image"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -4,7 +4,6 @@ import java.io.IOException;
|
|||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -127,9 +126,9 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
image = getFullSizeImage(imageHref);
|
image = getFullSizeImage(imageHref);
|
||||||
URL imageUrl = new URL(image);
|
URL imageUrl = new URL(image);
|
||||||
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||||
addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "");
|
addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true);
|
||||||
} else {
|
} else {
|
||||||
addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
|
addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true);
|
||||||
}
|
}
|
||||||
// X is our page index
|
// X is our page index
|
||||||
x++;
|
x++;
|
||||||
|
@ -86,7 +86,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
nextUrl = elem.attr("href");
|
nextUrl = elem.attr("href");
|
||||||
if (nextUrl == "") {
|
if (nextUrl.equals("")) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
return Http.url("eroshae.com" + nextUrl).get();
|
return Http.url("eroshae.com" + nextUrl).get();
|
||||||
@ -119,7 +119,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
for (Element img : imgs) {
|
for (Element img : imgs) {
|
||||||
if (img.hasClass("album-image")) {
|
if (img.hasClass("album-image")) {
|
||||||
String imageURL = img.attr("src");
|
String imageURL = img.attr("src");
|
||||||
imageURL = "https:" + imageURL;
|
imageURL = imageURL;
|
||||||
URLs.add(imageURL);
|
URLs.add(imageURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add("https:" + videoURL);
|
URLs.add(videoURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Profile videos
|
// Profile videos
|
||||||
@ -148,7 +148,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add("https:" + videoURL);
|
URLs.add(videoURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -209,7 +209,6 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
for (Element img : imgs) {
|
for (Element img : imgs) {
|
||||||
if (img.hasClass("album-image")) {
|
if (img.hasClass("album-image")) {
|
||||||
String imageURL = img.attr("src");
|
String imageURL = img.attr("src");
|
||||||
imageURL = "https:" + imageURL;
|
|
||||||
URLs.add(new URL(imageURL));
|
URLs.add(new URL(imageURL));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +218,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add(new URL("https:" + videoURL));
|
URLs.add(new URL(videoURL));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,7 +332,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
String[] fields = u.split("/");
|
String[] fields = u.split("/");
|
||||||
String prefix = getPrefix(index) + fields[fields.length - 3];
|
String prefix = getPrefix(index) + fields[fields.length - 3];
|
||||||
File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg");
|
File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg");
|
||||||
addURLToDownload(url, saveAs, "", null);
|
addURLToDownload(url, saveAs, "", null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,6 @@ import java.util.regex.Matcher;
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import org.jsoup.Connection.Method;
|
|
||||||
import org.jsoup.Connection.Response;
|
import org.jsoup.Connection.Response;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
@ -23,7 +22,6 @@ import org.jsoup.select.Elements;
|
|||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||||
import com.rarchives.ripme.utils.Base64;
|
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
public class FuraffinityRipper extends AbstractHTMLRipper {
|
public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||||
@ -162,10 +160,6 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
|||||||
if (!subdirectory.equals("")) {
|
if (!subdirectory.equals("")) {
|
||||||
subdirectory = File.separator + subdirectory;
|
subdirectory = File.separator + subdirectory;
|
||||||
}
|
}
|
||||||
int o = url.toString().lastIndexOf('/')-1;
|
|
||||||
String test = url.toString().substring(url.toString().lastIndexOf('/',o)+1);
|
|
||||||
test = test.replace("/",""); // This is probably not the best way to do this.
|
|
||||||
test = test.replace("\\",""); // CLOSE ENOUGH!
|
|
||||||
saveFileAs = new File(
|
saveFileAs = new File(
|
||||||
workingDir.getCanonicalPath()
|
workingDir.getCanonicalPath()
|
||||||
+ subdirectory
|
+ subdirectory
|
||||||
|
@ -0,0 +1,61 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class GfycatporntubeRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public GfycatporntubeRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "gfycatporntube";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "gfycatporntube.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://gfycatporntube.com/([a-zA-Z1-9_-]*)/?$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected gfycatporntube URL format: " +
|
||||||
|
"gfycatporntube.com/NAME - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
result.add(doc.select("source[id=mp4Source]").attr("src"));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -31,41 +31,68 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
|||||||
return "hentai2read.com";
|
return "hentai2read.com";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public boolean hasQueueSupport() {
|
||||||
Pattern p = Pattern.compile("https://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
return true;
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
}
|
||||||
if (m.matches()) {
|
|
||||||
return m.group(1);
|
@Override
|
||||||
}
|
public boolean pageContainsAlbums(URL url) {
|
||||||
throw new MalformedURLException("Expected hentai2read.com URL format: " +
|
logger.info("Page contains albums");
|
||||||
"hbrowse.com/COMICID - got " + url + " instead");
|
Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
||||||
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getAlbumsToQueue(Document doc) {
|
||||||
|
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||||
|
for (Element elem : doc.select(".nav-chapters > li > div.media > a")) {
|
||||||
|
urlsToAddToQueue.add(elem.attr("href"));
|
||||||
|
}
|
||||||
|
return urlsToAddToQueue;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d+)?/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1) + "_" + m.group(2);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected hentai2read.com URL format: " +
|
||||||
|
"hentai2read.com/COMICID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
Document tempDoc;
|
String thumbnailLink;
|
||||||
// get the first page of the comic
|
try {
|
||||||
if (url.toExternalForm().substring(url.toExternalForm().length() - 1).equals("/")) {
|
// If the page contains albums we want to load the main page
|
||||||
tempDoc = Http.url(url + "1").get();
|
if (pageContainsAlbums(url)) {
|
||||||
} else {
|
return Http.url(url).get();
|
||||||
tempDoc = Http.url(url + "/1").get();
|
|
||||||
}
|
|
||||||
for (Element el : tempDoc.select("ul.nav > li > a")) {
|
|
||||||
if (el.attr("href").startsWith("https://hentai2read.com/thumbnails/")) {
|
|
||||||
// Get the page with the thumbnails
|
|
||||||
return Http.url(el.attr("href")).get();
|
|
||||||
}
|
}
|
||||||
|
Document tempDoc;
|
||||||
|
tempDoc = Http.url(url).get();
|
||||||
|
// Get the thumbnail page so we can rip all images without loading every page in the comic
|
||||||
|
thumbnailLink = tempDoc.select("div.col-xs-12 > div.reader-controls > div.controls-block > button > a").attr("href");
|
||||||
|
if (!thumbnailLink.equals("")) {
|
||||||
|
return Http.url(thumbnailLink).get();
|
||||||
|
} else {
|
||||||
|
return Http.url(tempDoc.select("a[data-original-title=Thumbnails").attr("href")).get();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOException("Unable to get first page");
|
||||||
}
|
}
|
||||||
throw new IOException("Unable to get first page");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||||
try {
|
try {
|
||||||
Document doc = getFirstPage();
|
return getHost() + "_" + getGID(url);
|
||||||
String title = doc.select("span[itemprop=title]").text();
|
|
||||||
return getHost() + "_" + title;
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
logger.warn("Failed to get album title from " + url, e);
|
||||||
|
@ -10,6 +10,7 @@ import java.util.Map;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.Connection.Method;
|
||||||
import org.jsoup.Connection.Response;
|
import org.jsoup.Connection.Response;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
@ -49,19 +50,61 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
Response resp = Http.url("http://www.hentai-foundry.com/").response();
|
Response resp;
|
||||||
cookies = resp.cookies();
|
Document doc;
|
||||||
|
|
||||||
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
|
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
|
||||||
.referrer("http://www.hentai-foundry.com/")
|
.referrer("http://www.hentai-foundry.com/")
|
||||||
.cookies(cookies)
|
.cookies(cookies)
|
||||||
.response();
|
.response();
|
||||||
// The only cookie that seems to matter in getting around the age wall is the phpsession cookie
|
// The only cookie that seems to matter in getting around the age wall is the phpsession cookie
|
||||||
cookies.putAll(resp.cookies());
|
cookies.putAll(resp.cookies());
|
||||||
sleep(500);
|
|
||||||
|
doc = resp.parse();
|
||||||
|
String csrf_token = doc.select("input[name=YII_CSRF_TOKEN]")
|
||||||
|
.first().attr("value");
|
||||||
|
if (csrf_token != null) {
|
||||||
|
Map<String,String> data = new HashMap<>();
|
||||||
|
data.put("YII_CSRF_TOKEN" , csrf_token);
|
||||||
|
data.put("rating_nudity" , "3");
|
||||||
|
data.put("rating_violence" , "3");
|
||||||
|
data.put("rating_profanity", "3");
|
||||||
|
data.put("rating_racism" , "3");
|
||||||
|
data.put("rating_sex" , "3");
|
||||||
|
data.put("rating_spoilers" , "3");
|
||||||
|
data.put("rating_yaoi" , "1");
|
||||||
|
data.put("rating_yuri" , "1");
|
||||||
|
data.put("rating_teen" , "1");
|
||||||
|
data.put("rating_guro" , "1");
|
||||||
|
data.put("rating_furry" , "1");
|
||||||
|
data.put("rating_beast" , "1");
|
||||||
|
data.put("rating_male" , "1");
|
||||||
|
data.put("rating_female" , "1");
|
||||||
|
data.put("rating_futa" , "1");
|
||||||
|
data.put("rating_other" , "1");
|
||||||
|
data.put("rating_scat" , "1");
|
||||||
|
data.put("rating_incest" , "1");
|
||||||
|
data.put("rating_rape" , "1");
|
||||||
|
data.put("filter_media" , "A");
|
||||||
|
data.put("filter_order" , "date_new");
|
||||||
|
data.put("filter_type" , "0");
|
||||||
|
|
||||||
|
resp = Http.url("http://www.hentai-foundry.com/site/filters")
|
||||||
|
.referrer("http://www.hentai-foundry.com/")
|
||||||
|
.cookies(cookies)
|
||||||
|
.data(data)
|
||||||
|
.method(Method.POST)
|
||||||
|
.response();
|
||||||
|
cookies.putAll(resp.cookies());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
logger.info("unable to find csrf_token and set filter");
|
||||||
|
}
|
||||||
|
|
||||||
resp = Http.url(url)
|
resp = Http.url(url)
|
||||||
.referrer("http://www.hentai-foundry.com/")
|
.referrer("http://www.hentai-foundry.com/")
|
||||||
.cookies(cookies)
|
.cookies(cookies)
|
||||||
.response();
|
.response();
|
||||||
cookies.putAll(resp.cookies());
|
cookies.putAll(resp.cookies());
|
||||||
return resp.parse();
|
return resp.parse();
|
||||||
}
|
}
|
||||||
@ -74,12 +117,16 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
Elements els = doc.select("li.next > a");
|
Elements els = doc.select("li.next > a");
|
||||||
Element first = els.first();
|
Element first = els.first();
|
||||||
String nextURL = first.attr("href");
|
try {
|
||||||
nextURL = "http://www.hentai-foundry.com" + nextURL;
|
String nextURL = first.attr("href");
|
||||||
return Http.url(nextURL)
|
nextURL = "http://www.hentai-foundry.com" + nextURL;
|
||||||
.referrer(url)
|
return Http.url(nextURL)
|
||||||
.cookies(cookies)
|
.referrer(url)
|
||||||
.get();
|
.cookies(cookies)
|
||||||
|
.get();
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -97,13 +144,6 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
Document imagePage;
|
Document imagePage;
|
||||||
try {
|
try {
|
||||||
Response resp = Http.url("http://www.hentai-foundry.com/").response();
|
|
||||||
cookies = resp.cookies();
|
|
||||||
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
|
|
||||||
.referrer("http://www.hentai-foundry.com/")
|
|
||||||
.cookies(cookies)
|
|
||||||
.response();
|
|
||||||
cookies.putAll(resp.cookies());
|
|
||||||
|
|
||||||
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
||||||
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
||||||
|
@ -0,0 +1,73 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.json.JSONArray;
|
||||||
|
import org.json.JSONObject;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class HitomiRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
String galleryId = "";
|
||||||
|
|
||||||
|
public HitomiRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "hitomi";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "hitomi.la";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
galleryId = m.group(1);
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected hitomi URL format: " +
|
||||||
|
"https://hitomi.la/galleries/ID.html - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// if we go to /GALLERYID.js we get a nice json array of all images in the gallery
|
||||||
|
return Http.url(new URL(url.toExternalForm().replaceAll(".html", ".js"))).ignoreContentType().get();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
String json = doc.text().replaceAll("var galleryinfo =", "");
|
||||||
|
logger.info(json);
|
||||||
|
JSONArray json_data = new JSONArray(json);
|
||||||
|
for (int i = 0; i < json_data.length(); i++) {
|
||||||
|
result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,91 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
import javax.print.Doc;
|
||||||
|
|
||||||
|
public class HypnohubRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public HypnohubRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "hypnohub";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "hypnohub.net";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/?$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/([\\S]+)/?$");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1) + "_" + m.group(2);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected cfake URL format: " +
|
||||||
|
"hypnohub.net/pool/show/ID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String ripPost(String url) throws IOException {
|
||||||
|
logger.info(url);
|
||||||
|
Document doc = Http.url(url).get();
|
||||||
|
return "https:" + doc.select("img.image").attr("src");
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private String ripPost(Document doc) {
|
||||||
|
logger.info(url);
|
||||||
|
return "https:" + doc.select("img.image").attr("src");
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
if (url.toExternalForm().contains("/pool")) {
|
||||||
|
for (Element el : doc.select("ul[id=post-list-posts] > li > div > a.thumb")) {
|
||||||
|
try {
|
||||||
|
result.add(ripPost("https://hypnohub.net" + el.attr("href")));
|
||||||
|
} catch (IOException e) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (url.toExternalForm().contains("/post")) {
|
||||||
|
result.add(ripPost(doc));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,9 @@
|
|||||||
package com.rarchives.ripme.ripper.rippers;
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
@ -7,16 +11,10 @@ import java.util.ArrayList;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import org.jsoup.select.Elements;
|
import org.jsoup.select.Elements;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
|
||||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
|
||||||
import com.rarchives.ripme.utils.Http;
|
|
||||||
import com.rarchives.ripme.utils.Utils;
|
|
||||||
|
|
||||||
public class ImagebamRipper extends AbstractHTMLRipper {
|
public class ImagebamRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
// Current HTML document
|
// Current HTML document
|
||||||
@ -71,7 +69,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
public Document getNextPage(Document doc) throws IOException {
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
// Find next page
|
// Find next page
|
||||||
Elements hrefs = doc.select("a.pagination_current + a.pagination_link");
|
Elements hrefs = doc.select("a.pagination_current + a.pagination_link");
|
||||||
if (hrefs.size() == 0) {
|
if (hrefs.isEmpty()) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
String nextUrl = "http://www.imagebam.com" + hrefs.first().attr("href");
|
String nextUrl = "http://www.imagebam.com" + hrefs.first().attr("href");
|
||||||
@ -121,8 +119,8 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
* Handles case when site has IP-banned the user.
|
* Handles case when site has IP-banned the user.
|
||||||
*/
|
*/
|
||||||
private class ImagebamImageThread extends Thread {
|
private class ImagebamImageThread extends Thread {
|
||||||
private URL url;
|
private URL url; //link to "image page"
|
||||||
private int index;
|
private int index; //index in album
|
||||||
|
|
||||||
ImagebamImageThread(URL url, int index) {
|
ImagebamImageThread(URL url, int index) {
|
||||||
super();
|
super();
|
||||||
@ -134,28 +132,43 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
public void run() {
|
public void run() {
|
||||||
fetchImage();
|
fetchImage();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rips useful image from "image page"
|
||||||
|
*/
|
||||||
private void fetchImage() {
|
private void fetchImage() {
|
||||||
try {
|
try {
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
// Find image
|
// Find image
|
||||||
Elements images = doc.select(".image-container img");
|
Elements metaTags = doc.getElementsByTag("meta");
|
||||||
if (images.size() == 0) {
|
|
||||||
|
String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen.
|
||||||
|
|
||||||
|
for (Element metaTag: metaTags) {
|
||||||
|
//the direct link to the image seems to always be linked in the <meta> part of the html.
|
||||||
|
if (metaTag.attr("property").equals("og:image")) {
|
||||||
|
imgsrc = metaTag.attr("content");
|
||||||
|
logger.info("Found URL " + imgsrc);
|
||||||
|
break;//only one (useful) image possible for an "image page".
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//for debug, or something goes wrong.
|
||||||
|
if (imgsrc.isEmpty()) {
|
||||||
logger.warn("Image not found at " + this.url);
|
logger.warn("Image not found at " + this.url);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Element image = images.first();
|
|
||||||
String imgsrc = image.attr("src");
|
|
||||||
logger.info("Found URL " + imgsrc);
|
|
||||||
// Provide prefix and let the AbstractRipper "guess" the filename
|
// Provide prefix and let the AbstractRipper "guess" the filename
|
||||||
String prefix = "";
|
String prefix = "";
|
||||||
if (Utils.getConfigBoolean("download.save_order", true)) {
|
if (Utils.getConfigBoolean("download.save_order", true)) {
|
||||||
prefix = String.format("%03d_", index);
|
prefix = String.format("%03d_", index);
|
||||||
}
|
}
|
||||||
|
|
||||||
addURLToDownload(new URL(imgsrc), prefix);
|
addURLToDownload(new URL(imgsrc), prefix);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
USER,
|
USER,
|
||||||
USER_ALBUM,
|
USER_ALBUM,
|
||||||
USER_IMAGES,
|
USER_IMAGES,
|
||||||
|
SINGLE_IMAGE,
|
||||||
SERIES_OF_IMAGES,
|
SERIES_OF_IMAGES,
|
||||||
SUBREDDIT
|
SUBREDDIT
|
||||||
}
|
}
|
||||||
@ -155,34 +156,48 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
switch (albumType) {
|
switch (albumType) {
|
||||||
case ALBUM:
|
case ALBUM:
|
||||||
// Fall-through
|
// Fall-through
|
||||||
case USER_ALBUM:
|
case USER_ALBUM:
|
||||||
logger.info("Album type is USER_ALBUM");
|
logger.info("Album type is USER_ALBUM");
|
||||||
// Don't call getAlbumTitle(this.url) with this
|
// Don't call getAlbumTitle(this.url) with this
|
||||||
// as it seems to cause the album to be downloaded to a subdir.
|
// as it seems to cause the album to be downloaded to a subdir.
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case SERIES_OF_IMAGES:
|
case SERIES_OF_IMAGES:
|
||||||
logger.info("Album type is SERIES_OF_IMAGES");
|
logger.info("Album type is SERIES_OF_IMAGES");
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case USER:
|
case SINGLE_IMAGE:
|
||||||
logger.info("Album type is USER");
|
logger.info("Album type is SINGLE_IMAGE");
|
||||||
ripUserAccount(url);
|
ripSingleImage(this.url);
|
||||||
break;
|
break;
|
||||||
case SUBREDDIT:
|
case USER:
|
||||||
logger.info("Album type is SUBREDDIT");
|
logger.info("Album type is USER");
|
||||||
ripSubreddit(url);
|
ripUserAccount(url);
|
||||||
break;
|
break;
|
||||||
case USER_IMAGES:
|
case SUBREDDIT:
|
||||||
logger.info("Album type is USER_IMAGES");
|
logger.info("Album type is SUBREDDIT");
|
||||||
ripUserImages(url);
|
ripSubreddit(url);
|
||||||
break;
|
break;
|
||||||
|
case USER_IMAGES:
|
||||||
|
logger.info("Album type is USER_IMAGES");
|
||||||
|
ripUserImages(url);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void ripSingleImage(URL url) throws IOException {
|
||||||
|
String strUrl = url.toExternalForm();
|
||||||
|
Document document = getDocument(strUrl);
|
||||||
|
Matcher m = getEmbeddedJsonMatcher(document);
|
||||||
|
if (m.matches()) {
|
||||||
|
JSONObject json = new JSONObject(m.group(1)).getJSONObject("image");
|
||||||
|
addURLToDownload(extractImageUrlFromJson(json), "");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void ripAlbum(URL url) throws IOException {
|
private void ripAlbum(URL url) throws IOException {
|
||||||
ripAlbum(url, "");
|
ripAlbum(url, "");
|
||||||
}
|
}
|
||||||
@ -257,38 +272,16 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
strUrl += "/all";
|
strUrl += "/all";
|
||||||
}
|
}
|
||||||
logger.info(" Retrieving " + strUrl);
|
logger.info(" Retrieving " + strUrl);
|
||||||
Document doc = Jsoup.connect(strUrl)
|
Document doc = getDocument(strUrl);
|
||||||
.userAgent(USER_AGENT)
|
|
||||||
.timeout(10 * 1000)
|
|
||||||
.maxBodySize(0)
|
|
||||||
.get();
|
|
||||||
|
|
||||||
// Try to use embedded JSON to retrieve images
|
// Try to use embedded JSON to retrieve images
|
||||||
Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
|
Matcher m = getEmbeddedJsonMatcher(doc);
|
||||||
Matcher m = p.matcher(doc.body().html());
|
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
try {
|
try {
|
||||||
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
|
||||||
JSONObject json = new JSONObject(m.group(1));
|
JSONObject json = new JSONObject(m.group(1));
|
||||||
JSONArray images = json.getJSONObject("image")
|
JSONArray jsonImages = json.getJSONObject("image")
|
||||||
.getJSONObject("album_images")
|
.getJSONObject("album_images")
|
||||||
.getJSONArray("images");
|
.getJSONArray("images");
|
||||||
int imagesLength = images.length();
|
return createImgurAlbumFromJsonArray(url, jsonImages);
|
||||||
for (int i = 0; i < imagesLength; i++) {
|
|
||||||
JSONObject image = images.getJSONObject(i);
|
|
||||||
String ext = image.getString("ext");
|
|
||||||
if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
|
||||||
ext = ".mp4";
|
|
||||||
}
|
|
||||||
URL imageURL = new URL(
|
|
||||||
"http://i.imgur.com/"
|
|
||||||
+ image.getString("hash")
|
|
||||||
+ ext);
|
|
||||||
ImgurImage imgurImage = new ImgurImage(imageURL);
|
|
||||||
imgurImage.extension = ext;
|
|
||||||
imgurAlbum.addImage(imgurImage);
|
|
||||||
}
|
|
||||||
return imgurAlbum;
|
|
||||||
} catch (JSONException e) {
|
} catch (JSONException e) {
|
||||||
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
||||||
}
|
}
|
||||||
@ -330,6 +323,44 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
return imgurAlbum;
|
return imgurAlbum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static Matcher getEmbeddedJsonMatcher(Document doc) {
|
||||||
|
Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
|
||||||
|
return p.matcher(doc.body().html());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException {
|
||||||
|
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
||||||
|
int imagesLength = jsonImages.length();
|
||||||
|
for (int i = 0; i < imagesLength; i++) {
|
||||||
|
JSONObject jsonImage = jsonImages.getJSONObject(i);
|
||||||
|
imgurAlbum.addImage(createImgurImageFromJson(jsonImage));
|
||||||
|
}
|
||||||
|
return imgurAlbum;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ImgurImage createImgurImageFromJson(JSONObject json) throws MalformedURLException {
|
||||||
|
return new ImgurImage(extractImageUrlFromJson(json));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException {
|
||||||
|
String ext = json.getString("ext");
|
||||||
|
if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
||||||
|
ext = ".mp4";
|
||||||
|
}
|
||||||
|
return new URL(
|
||||||
|
"http://i.imgur.com/"
|
||||||
|
+ json.getString("hash")
|
||||||
|
+ ext);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Document getDocument(String strUrl) throws IOException {
|
||||||
|
return Jsoup.connect(strUrl)
|
||||||
|
.userAgent(USER_AGENT)
|
||||||
|
.timeout(10 * 1000)
|
||||||
|
.maxBodySize(0)
|
||||||
|
.get();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Rips all albums in an imgur user's account.
|
* Rips all albums in an imgur user's account.
|
||||||
* @param url
|
* @param url
|
||||||
@ -507,6 +538,13 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid);
|
this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid);
|
||||||
return "r_" + subreddit + "_" + gid;
|
return "r_" + subreddit + "_" + gid;
|
||||||
}
|
}
|
||||||
|
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
// Single imgur image
|
||||||
|
albumType = ALBUM_TYPE.SINGLE_IMAGE;
|
||||||
|
return m.group(m.groupCount());
|
||||||
|
}
|
||||||
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$");
|
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$");
|
||||||
m = p.matcher(url.toExternalForm());
|
m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
|
@ -3,6 +3,7 @@ package com.rarchives.ripme.ripper.rippers;
|
|||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.net.URLConnection;
|
import java.net.URLConnection;
|
||||||
@ -12,27 +13,38 @@ import java.util.ArrayList;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
import java.security.*;
|
||||||
|
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONException;
|
import org.json.JSONException;
|
||||||
import org.json.JSONObject;
|
import org.json.JSONObject;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
import org.jsoup.Connection;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import java.util.HashMap;
|
||||||
|
|
||||||
|
|
||||||
public class InstagramRipper extends AbstractHTMLRipper {
|
public class InstagramRipper extends AbstractJSONRipper {
|
||||||
String nextPageID = "";
|
String nextPageID = "";
|
||||||
private String qHash;
|
private String qHash;
|
||||||
private boolean rippingTag = false;
|
private boolean rippingTag = false;
|
||||||
private String tagName;
|
private String tagName;
|
||||||
|
|
||||||
private String userID;
|
private String userID;
|
||||||
|
private String rhx_gis = null;
|
||||||
|
private String csrftoken;
|
||||||
|
// Run into a weird issue with Jsoup cutting some json pages in half, this is a work around
|
||||||
|
// see https://github.com/RipMeApp/ripme/issues/601
|
||||||
|
private String workAroundJsonString;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public InstagramRipper(URL url) throws IOException {
|
public InstagramRipper(URL url) throws IOException {
|
||||||
super(url);
|
super(url);
|
||||||
@ -65,11 +77,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
return url.replaceAll("/[A-Z0-9]{8}/", "/");
|
return url.replaceAll("/[A-Z0-9]{8}/", "/");
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> getPostsFromSinglePage(Document Doc) {
|
private List<String> getPostsFromSinglePage(JSONObject json) {
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
JSONArray datas;
|
JSONArray datas;
|
||||||
try {
|
|
||||||
JSONObject json = getJSONFromPage(Doc);
|
|
||||||
if (json.getJSONObject("entry_data").getJSONArray("PostPage")
|
if (json.getJSONObject("entry_data").getJSONArray("PostPage")
|
||||||
.getJSONObject(0).getJSONObject("graphql").getJSONObject("shortcode_media")
|
.getJSONObject(0).getJSONObject("graphql").getJSONObject("shortcode_media")
|
||||||
.has("edge_sidecar_to_children")) {
|
.has("edge_sidecar_to_children")) {
|
||||||
@ -95,10 +105,6 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return imageURLs;
|
return imageURLs;
|
||||||
} catch (IOException e) {
|
|
||||||
logger.error("Unable to get JSON from page " + url.toExternalForm());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -176,11 +182,14 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public JSONObject getFirstPage() throws IOException {
|
||||||
Document p = Http.url(url).get();
|
Connection.Response resp = Http.url(url).response();
|
||||||
|
logger.info(resp.cookies());
|
||||||
|
csrftoken = resp.cookie("csrftoken");
|
||||||
|
Document p = resp.parse();
|
||||||
// Get the query hash so we can download the next page
|
// Get the query hash so we can download the next page
|
||||||
qHash = getQHash(p);
|
qHash = getQHash(p);
|
||||||
return p;
|
return getJSONFromPage(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getVideoFromPage(String videoID) {
|
private String getVideoFromPage(String videoID) {
|
||||||
@ -224,16 +233,13 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromJSON(JSONObject json) {
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
JSONObject json = new JSONObject();
|
|
||||||
try {
|
// get the rhx_gis value so we can get the next page later on
|
||||||
json = getJSONFromPage(doc);
|
if (rhx_gis == null) {
|
||||||
} catch (IOException e) {
|
rhx_gis = json.getString("rhx_gis");
|
||||||
logger.warn("Unable to exact json from page");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (!url.toExternalForm().contains("/p/")) {
|
if (!url.toExternalForm().contains("/p/")) {
|
||||||
JSONArray datas = new JSONArray();
|
JSONArray datas = new JSONArray();
|
||||||
if (!rippingTag) {
|
if (!rippingTag) {
|
||||||
@ -268,7 +274,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
if (data.getString("__typename").equals("GraphSidecar")) {
|
if (data.getString("__typename").equals("GraphSidecar")) {
|
||||||
try {
|
try {
|
||||||
Document slideShowDoc = Http.url(new URL("https://www.instagram.com/p/" + data.getString("shortcode"))).get();
|
Document slideShowDoc = Http.url(new URL("https://www.instagram.com/p/" + data.getString("shortcode"))).get();
|
||||||
List<String> toAdd = getPostsFromSinglePage(slideShowDoc);
|
List<String> toAdd = getPostsFromSinglePage(getJSONFromPage(slideShowDoc));
|
||||||
for (int slideShowInt = 0; slideShowInt < toAdd.size(); slideShowInt++) {
|
for (int slideShowInt = 0; slideShowInt < toAdd.size(); slideShowInt++) {
|
||||||
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
||||||
}
|
}
|
||||||
@ -284,9 +290,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
if (imageURLs.size() == 0) {
|
if (imageURLs.size() == 0) {
|
||||||
// We add this one item to the array because either wise
|
// We add this one item to the array because either wise
|
||||||
// the ripper will error out because we returned an empty array
|
// the ripper will error out because we returned an empty array
|
||||||
imageURLs.add(getOriginalUrl(data.getString("thumbnail_src")));
|
imageURLs.add(getOriginalUrl(data.getString("display_url")));
|
||||||
}
|
}
|
||||||
addURLToDownload(new URL(getOriginalUrl(data.getString("thumbnail_src"))), image_date);
|
addURLToDownload(new URL(data.getString("display_url")), image_date);
|
||||||
} else {
|
} else {
|
||||||
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
||||||
addURLToDownload(new URL(getVideoFromPage(data.getString("shortcode"))), image_date);
|
addURLToDownload(new URL(getVideoFromPage(data.getString("shortcode"))), image_date);
|
||||||
@ -307,23 +313,52 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
} else { // We're ripping from a single page
|
} else { // We're ripping from a single page
|
||||||
logger.info("Ripping from single page");
|
logger.info("Ripping from single page");
|
||||||
imageURLs = getPostsFromSinglePage(doc);
|
imageURLs = getPostsFromSinglePage(json);
|
||||||
}
|
}
|
||||||
|
|
||||||
return imageURLs;
|
return imageURLs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getIGGis(String variables) {
|
||||||
|
String stringToMD5 = rhx_gis + ":" + variables;
|
||||||
|
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||||
|
try {
|
||||||
|
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
||||||
|
|
||||||
|
MessageDigest md = MessageDigest.getInstance("MD5");
|
||||||
|
byte[] hash = md.digest(bytesOfMessage);
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
for (int i = 0; i < hash.length; ++i) {
|
||||||
|
sb.append(Integer.toHexString((hash[i] & 0xFF) | 0x100).substring(1,3));
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
|
} catch(UnsupportedEncodingException e) {
|
||||||
|
return null;
|
||||||
|
} catch(NoSuchAlgorithmException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getNextPage(Document doc) throws IOException {
|
public JSONObject getNextPage(JSONObject json) throws IOException {
|
||||||
Document toreturn;
|
JSONObject toreturn;
|
||||||
|
java.util.Map<String, String> cookies = new HashMap<String, String>();
|
||||||
|
// This shouldn't be hardcoded and will break one day
|
||||||
|
cookies.put("ig_pr", "1");
|
||||||
|
cookies.put("csrftoken", csrftoken);
|
||||||
if (!nextPageID.equals("") && !isThisATest()) {
|
if (!nextPageID.equals("") && !isThisATest()) {
|
||||||
if (rippingTag) {
|
if (rippingTag) {
|
||||||
try {
|
try {
|
||||||
sleep(2500);
|
sleep(2500);
|
||||||
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
String vars = "{\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}";
|
||||||
"&variables={\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
|
String ig_gis = getIGGis(vars);
|
||||||
|
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
||||||
|
"&variables=" + vars, ig_gis);
|
||||||
// Sleep for a while to avoid a ban
|
// Sleep for a while to avoid a ban
|
||||||
logger.info(toreturn.html());
|
logger.info(toreturn);
|
||||||
|
if (!pageHasImages(toreturn)) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
return toreturn;
|
return toreturn;
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@ -334,8 +369,11 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
// Sleep for a while to avoid a ban
|
// Sleep for a while to avoid a ban
|
||||||
sleep(2500);
|
sleep(2500);
|
||||||
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" +
|
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
||||||
"{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
|
String ig_gis = getIGGis(vars);
|
||||||
|
logger.info(ig_gis);
|
||||||
|
|
||||||
|
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
|
||||||
if (!pageHasImages(toreturn)) {
|
if (!pageHasImages(toreturn)) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
@ -353,9 +391,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(url);
|
addURLToDownload(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean pageHasImages(Document doc) {
|
private boolean pageHasImages(JSONObject json) {
|
||||||
logger.info("BAD DATA: " + stripHTMLTags(doc.html()));
|
|
||||||
JSONObject json = new JSONObject(stripHTMLTags(doc.html()));
|
|
||||||
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
||||||
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
||||||
if (numberOfImages == 0) {
|
if (numberOfImages == 0) {
|
||||||
@ -364,6 +400,34 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private JSONObject getPage(String url, String ig_gis) {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
try {
|
||||||
|
// We can't use Jsoup here because it won't download a non-html file larger than a MB
|
||||||
|
// even if you set maxBodySize to 0
|
||||||
|
URLConnection connection = new URL(url).openConnection();
|
||||||
|
connection.setRequestProperty("User-Agent", USER_AGENT);
|
||||||
|
connection.setRequestProperty("x-instagram-gis", ig_gis);
|
||||||
|
BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
|
||||||
|
String line;
|
||||||
|
while ((line = in.readLine()) != null) {
|
||||||
|
sb.append(line);
|
||||||
|
|
||||||
|
}
|
||||||
|
in.close();
|
||||||
|
workAroundJsonString = sb.toString();
|
||||||
|
return new JSONObject(sb.toString());
|
||||||
|
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.info("Unable to get query_hash, " + url + " is a malformed URL");
|
||||||
|
return null;
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info("Unable to get query_hash");
|
||||||
|
logger.info(e.getMessage());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private String getQHash(Document doc) {
|
private String getQHash(Document doc) {
|
||||||
String jsFileURL = "https://www.instagram.com" + doc.select("link[rel=preload]").attr("href");
|
String jsFileURL = "https://www.instagram.com" + doc.select("link[rel=preload]").attr("href");
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
@ -393,6 +457,12 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
if (m.find()) {
|
if (m.find()) {
|
||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
|
jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||||
Matcher m = jsP.matcher(sb.toString());
|
Matcher m = jsP.matcher(sb.toString());
|
||||||
@ -400,7 +470,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.info("Could not find query_hash on " + jsFileURL);
|
logger.error("Could not find query_hash on " + jsFileURL);
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
|
|||||||
// This is here for pages with mp4s instead of images
|
// This is here for pages with mp4s instead of images
|
||||||
String video_image = "";
|
String video_image = "";
|
||||||
video_image = page.select("div > video > source").attr("src");
|
video_image = page.select("div > video > source").attr("src");
|
||||||
if (video_image != "") {
|
if (!video_image.equals("")) {
|
||||||
urls.add(video_image);
|
urls.add(video_image);
|
||||||
}
|
}
|
||||||
return urls;
|
return urls;
|
||||||
|
@ -0,0 +1,116 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
import javax.print.Doc;
|
||||||
|
|
||||||
|
public class ManganeloRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public ManganeloRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "manganelo";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "manganelo.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://manganelo.com/manga/([\\S]+)/?$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
p = Pattern.compile("http://manganelo.com/chapter/([\\S]+)/([\\S]+)/?$");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected manganelo URL format: " +
|
||||||
|
"/manganelo.com/manga/ID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
Element elem = doc.select("div.btn-navigation-chap > a.back").first();
|
||||||
|
if (elem == null) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
} else {
|
||||||
|
return Http.url(elem.attr("href")).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> getURLsFromChap(String url) {
|
||||||
|
logger.debug("Getting urls from " + url);
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
try {
|
||||||
|
Document doc = Http.url(url).get();
|
||||||
|
for (Element el : doc.select("img.img_content")) {
|
||||||
|
result.add(el.attr("src"));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> getURLsFromChap(Document doc) {
|
||||||
|
logger.debug("Getting urls from " + url);
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (Element el : doc.select("img.img_content")) {
|
||||||
|
result.add(el.attr("src"));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
List<String> urlsToGrab = new ArrayList<>();
|
||||||
|
if (url.toExternalForm().contains("/manga/")) {
|
||||||
|
for (Element el : doc.select("div.chapter-list > div.row > span > a")) {
|
||||||
|
urlsToGrab.add(el.attr("href"));
|
||||||
|
}
|
||||||
|
Collections.reverse(urlsToGrab);
|
||||||
|
|
||||||
|
for (String url : urlsToGrab) {
|
||||||
|
result.addAll(getURLsFromChap(url));
|
||||||
|
}
|
||||||
|
} else if (url.toExternalForm().contains("/chapter/")) {
|
||||||
|
result.addAll(getURLsFromChap(doc));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,64 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
public class ModelxRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public ModelxRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "modelx";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "modelx.org";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("^.*modelx.org/.*/(.+)$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new MalformedURLException("Expected URL format: http://www.modelx.org/[category (one or more)]/xxxxx got: " + url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document page) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
|
||||||
|
for (Element el : page.select(".gallery-icon > a")) {
|
||||||
|
result.add(el.attr("href"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -34,21 +34,18 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
|||||||
Pattern p = Pattern.compile("^https?://myhentaicomics.com/index.php/([a-zA-Z0-9-]*)/?$");
|
Pattern p = Pattern.compile("^https?://myhentaicomics.com/index.php/([a-zA-Z0-9-]*)/?$");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
isTag = false;
|
|
||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
|
Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
|
||||||
Matcher ma = pa.matcher(url.toExternalForm());
|
Matcher ma = pa.matcher(url.toExternalForm());
|
||||||
if (ma.matches()) {
|
if (ma.matches()) {
|
||||||
isTag = true;
|
|
||||||
return ma.group(1);
|
return ma.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
|
Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
|
||||||
Matcher mat = pat.matcher(url.toExternalForm());
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
if (mat.matches()) {
|
if (mat.matches()) {
|
||||||
isTag = true;
|
|
||||||
return mat.group(1);
|
return mat.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,6 +53,37 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
|||||||
"myhentaicomics.com/index.php/albumName - got " + url + " instead");
|
"myhentaicomics.com/index.php/albumName - got " + url + " instead");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasQueueSupport() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean pageContainsAlbums(URL url) {
|
||||||
|
Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
|
||||||
|
Matcher ma = pa.matcher(url.toExternalForm());
|
||||||
|
if (ma.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
|
||||||
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
isTag = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getAlbumsToQueue(Document doc) {
|
||||||
|
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||||
|
for (Element elem : doc.select(".g-album > a")) {
|
||||||
|
urlsToAddToQueue.add(getDomain() + elem.attr("href"));
|
||||||
|
}
|
||||||
|
return urlsToAddToQueue;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
// "url" is an instance field of the superclass
|
// "url" is an instance field of the superclass
|
||||||
@ -73,7 +101,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
|||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
nextUrl = "http://myhentaicomics.com" + m.group(0);
|
nextUrl = "http://myhentaicomics.com" + m.group(0);
|
||||||
}
|
}
|
||||||
if (nextUrl == "") {
|
if (nextUrl.equals("")) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
// Sleep for half a sec to avoid getting IP banned
|
// Sleep for half a sec to avoid getting IP banned
|
||||||
@ -81,161 +109,11 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
|||||||
return Http.url(nextUrl).get();
|
return Http.url(nextUrl).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This replaces getNextPage when downloading from searchs and tags
|
|
||||||
private List<String> getNextAlbumPage(String pageUrl) {
|
|
||||||
List<String> albumPagesList = new ArrayList<>();
|
|
||||||
int pageNumber = 1;
|
|
||||||
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
|
|
||||||
while (true) {
|
|
||||||
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
|
|
||||||
Document nextAlbumPage;
|
|
||||||
try {
|
|
||||||
logger.info("Grabbing " + urlToGet);
|
|
||||||
nextAlbumPage = Http.url(urlToGet).get();
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.warn("Failed to log link in Jsoup");
|
|
||||||
nextAlbumPage = null;
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
|
|
||||||
String nextPage = elem.attr("href");
|
|
||||||
pageNumber = pageNumber + 1;
|
|
||||||
if (nextPage == "") {
|
|
||||||
logger.info("Got " + pageNumber + " pages");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
logger.info(nextPage);
|
|
||||||
albumPagesList.add(nextPage);
|
|
||||||
logger.info("Adding " + nextPage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return albumPagesList;
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<String> getAlbumsFromPage(String url) {
|
|
||||||
List<String> pagesToRip;
|
|
||||||
List<String> result = new ArrayList<>();
|
|
||||||
logger.info("Running getAlbumsFromPage");
|
|
||||||
Document doc;
|
|
||||||
try {
|
|
||||||
doc = Http.url("http://myhentaicomics.com" + url).get();
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.warn("Failed to log link in Jsoup");
|
|
||||||
doc = null;
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
// This for goes over every album on the page
|
|
||||||
for (Element elem : doc.select("li.g-album > a")) {
|
|
||||||
String link = elem.attr("href");
|
|
||||||
logger.info("Grabbing album " + link);
|
|
||||||
pagesToRip = getNextAlbumPage(link);
|
|
||||||
logger.info(pagesToRip);
|
|
||||||
for (String element : pagesToRip) {
|
|
||||||
Document album_doc;
|
|
||||||
try {
|
|
||||||
logger.info("grabbing " + element + " with jsoup");
|
|
||||||
boolean startsWithHttp = element.startsWith("http://");
|
|
||||||
if (!startsWithHttp) {
|
|
||||||
album_doc = Http.url("http://myhentaicomics.com/" + element).get();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
album_doc = Http.url(element).get();
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.warn("Failed to log link in Jsoup");
|
|
||||||
album_doc = null;
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
for (Element el :album_doc.select("img")) {
|
|
||||||
String imageSource = el.attr("src");
|
|
||||||
// This bool is here so we don't try and download the site logo
|
|
||||||
if (!imageSource.startsWith("http://")) {
|
|
||||||
// We replace thumbs with resizes so we can the full sized images
|
|
||||||
imageSource = imageSource.replace("thumbs", "resizes");
|
|
||||||
String url_string = "http://myhentaicomics.com/" + imageSource;
|
|
||||||
url_string = url_string.replace("%20", "_");
|
|
||||||
url_string = url_string.replace("%27", "");
|
|
||||||
url_string = url_string.replace("%28", "_");
|
|
||||||
url_string = url_string.replace("%29", "_");
|
|
||||||
url_string = url_string.replace("%2C", "_");
|
|
||||||
if (isTag) {
|
|
||||||
logger.info("Downloading from a tag or search");
|
|
||||||
try {
|
|
||||||
sleep(500);
|
|
||||||
result.add("http://myhentaicomics.com/" + imageSource);
|
|
||||||
addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]);
|
|
||||||
}
|
|
||||||
catch (MalformedURLException e) {
|
|
||||||
logger.warn("Malformed URL");
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<String> getListOfPages(Document doc) {
|
|
||||||
List<String> pages = new ArrayList<>();
|
|
||||||
// Get the link from the last button
|
|
||||||
String nextPageUrl = doc.select("a.ui-icon-right").last().attr("href");
|
|
||||||
Pattern pat = Pattern.compile("/index\\.php/tag/[0-9]*/[a-zA-Z0-9_\\-:+]*\\?page=(\\d+)");
|
|
||||||
Matcher mat = pat.matcher(nextPageUrl);
|
|
||||||
if (mat.matches()) {
|
|
||||||
logger.debug("Getting pages from a tag");
|
|
||||||
String base_link = mat.group(0).replaceAll("\\?page=\\d+", "");
|
|
||||||
logger.debug("base_link is " + base_link);
|
|
||||||
int numOfPages = Integer.parseInt(mat.group(1));
|
|
||||||
for (int x = 1; x != numOfPages +1; x++) {
|
|
||||||
logger.debug("running loop");
|
|
||||||
String link = base_link + "?page=" + Integer.toString(x);
|
|
||||||
pages.add(link);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Pattern pa = Pattern.compile("/index\\.php/search\\?q=[a-zA-Z0-9_\\-:]*&page=(\\d+)");
|
|
||||||
Matcher ma = pa.matcher(nextPageUrl);
|
|
||||||
if (ma.matches()) {
|
|
||||||
logger.debug("Getting pages from a search");
|
|
||||||
String base_link = ma.group(0).replaceAll("page=\\d+", "");
|
|
||||||
logger.debug("base_link is " + base_link);
|
|
||||||
int numOfPages = Integer.parseInt(ma.group(1));
|
|
||||||
for (int x = 1; x != numOfPages +1; x++) {
|
|
||||||
logger.debug("running loop");
|
|
||||||
String link = base_link + "page=" + Integer.toString(x);
|
|
||||||
logger.debug(link);
|
|
||||||
pages.add(link);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
// Checks if this is a comic page or a page of albums
|
|
||||||
// If true the page is a page of albums
|
|
||||||
if (doc.toString().contains("class=\"g-item g-album\"")) {
|
|
||||||
// This if checks that there is more than 1 page
|
|
||||||
if (doc.select("a.ui-icon-right").last().attr("href") != "") {
|
|
||||||
// There is more than one page so we call getListOfPages
|
|
||||||
List<String> pagesToRip = getListOfPages(doc);
|
|
||||||
logger.debug("Pages to rip = " + pagesToRip);
|
|
||||||
for (String url : pagesToRip) {
|
|
||||||
logger.debug("Getting albums from " + url);
|
|
||||||
result = getAlbumsFromPage(url);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logger.debug("There is only one page on this page of albums");
|
|
||||||
// There is only 1 page so we call getAlbumsFromPage and pass it the page url
|
|
||||||
result = getAlbumsFromPage(doc.select("div.g-description > a").attr("href"));
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
for (Element el : doc.select("img")) {
|
for (Element el : doc.select("img")) {
|
||||||
String imageSource = el.attr("src");
|
String imageSource = el.attr("src");
|
||||||
// This bool is here so we don't try and download the site logo
|
// This bool is here so we don't try and download the site logo
|
||||||
@ -245,7 +123,6 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
|||||||
result.add("http://myhentaicomics.com/" + imageSource);
|
result.add("http://myhentaicomics.com/" + imageSource);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package com.rarchives.ripme.ripper.rippers;
|
|||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||||
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
@ -64,6 +65,39 @@ public class NhentaiRipper extends AbstractHTMLRipper {
|
|||||||
return "nhentai" + title;
|
return "nhentai" + title;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<String> getTags(Document doc) {
|
||||||
|
List<String> tags = new ArrayList<>();
|
||||||
|
for (Element tag : doc.select("a.tag")) {
|
||||||
|
tags.add(tag.attr("href").replaceAll("/tag/", "").replaceAll("/", ""));
|
||||||
|
}
|
||||||
|
return tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks for blacklisted tags on page. If it finds one it returns it, if not it return null
|
||||||
|
*
|
||||||
|
* @param doc
|
||||||
|
* @return String
|
||||||
|
*/
|
||||||
|
public String checkTags(Document doc, String[] blackListedTags) {
|
||||||
|
// If the user hasn't blacklisted any tags we return false;
|
||||||
|
if (blackListedTags == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
logger.info("Blacklisted tags " + blackListedTags[0]);
|
||||||
|
List<String> tagsOnPage = getTags(doc);
|
||||||
|
for (String tag : blackListedTags) {
|
||||||
|
for (String pageTag : tagsOnPage) {
|
||||||
|
// We replace all dashes in the tag with spaces because the tags we get from the site are separated using
|
||||||
|
// dashes
|
||||||
|
if (tag.trim().toLowerCase().equals(pageTag.replaceAll("-", " ").toLowerCase())) {
|
||||||
|
return tag;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
// Ex: https://nhentai.net/g/159174/
|
// Ex: https://nhentai.net/g/159174/
|
||||||
@ -82,6 +116,13 @@ public class NhentaiRipper extends AbstractHTMLRipper {
|
|||||||
if (firstPage == null) {
|
if (firstPage == null) {
|
||||||
firstPage = Http.url(url).get();
|
firstPage = Http.url(url).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String blacklistedTag = checkTags(firstPage, Utils.getConfigStringArray("nhentai.blacklist.tags"));
|
||||||
|
if (blacklistedTag != null) {
|
||||||
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
|
||||||
|
"contains the blacklisted tag \"" + blacklistedTag + "\"");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
return firstPage;
|
return firstPage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,23 +33,6 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
|
|||||||
return "nude-gals.com";
|
return "nude-gals.com";
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
|
||||||
try {
|
|
||||||
Document doc = getFirstPage();
|
|
||||||
Elements elems = doc.select("#left_col > #grid_title > .right");
|
|
||||||
|
|
||||||
String girl = elems.get(3).text();
|
|
||||||
String magazine = elems.get(2).text();
|
|
||||||
String title = elems.get(0).text();
|
|
||||||
|
|
||||||
return getHost() + "_" + girl + "-" + magazine + "-" + title;
|
|
||||||
} catch (Exception e) {
|
|
||||||
// Fall back to default album naming convention
|
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
|
||||||
}
|
|
||||||
return super.getAlbumTitle(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
Pattern p;
|
Pattern p;
|
||||||
@ -79,9 +62,9 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
|
|||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
|
|
||||||
Elements thumbs = doc.select("#grid_container .grid > .grid_box");
|
Elements thumbs = doc.select("img.thumbnail");
|
||||||
for (Element thumb : thumbs) {
|
for (Element thumb : thumbs) {
|
||||||
String link = thumb.select("a").get(1).attr("href");
|
String link = thumb.attr("src").replaceAll("thumbs/th_", "");
|
||||||
String imgSrc = "http://nude-gals.com/" + link;
|
String imgSrc = "http://nude-gals.com/" + link;
|
||||||
imageURLs.add(imgSrc);
|
imageURLs.add(imgSrc);
|
||||||
}
|
}
|
||||||
|
@ -4,10 +4,13 @@ import java.io.File;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONObject;
|
import org.json.JSONObject;
|
||||||
import org.json.JSONTokener;
|
import org.json.JSONTokener;
|
||||||
@ -17,6 +20,9 @@ import com.rarchives.ripme.ui.UpdateUtils;
|
|||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
import com.rarchives.ripme.utils.RipUtils;
|
import com.rarchives.ripme.utils.RipUtils;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
public class RedditRipper extends AlbumRipper {
|
public class RedditRipper extends AlbumRipper {
|
||||||
|
|
||||||
|
@ -0,0 +1,94 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class Rule34Ripper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public Rule34Ripper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
private String apiUrl;
|
||||||
|
private int pageNumber = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "rule34";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "rule34.xxx";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean canRip(URL url){
|
||||||
|
Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected rule34.xxx URL format: " +
|
||||||
|
"rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
public URL getAPIUrl() throws MalformedURLException {
|
||||||
|
URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url));
|
||||||
|
return urlToReturn;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
apiUrl = getAPIUrl().toExternalForm();
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(getAPIUrl()).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
if (doc.html().contains("Search error: API limited due to abuse")) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
pageNumber += 1;
|
||||||
|
String nextPage = apiUrl + "&pid=" + pageNumber;
|
||||||
|
return Http.url(nextPage).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (Element el : doc.select("posts > post")) {
|
||||||
|
String imageSource = el.select("post").attr("file_url");
|
||||||
|
result.add(imageSource);
|
||||||
|
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -43,7 +43,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
try {
|
try {
|
||||||
return URLDecoder.decode(m.group(2), "UTF-8");
|
return URLDecoder.decode(m.group(1) + "_" + m.group(2), "UTF-8");
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'");
|
throw new MalformedURLException("Cannot decode tag name '" + m.group(1) + "'");
|
||||||
}
|
}
|
||||||
@ -53,6 +53,20 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|||||||
url + "instead");
|
url + "instead");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getSubDomain(URL url){
|
||||||
|
Pattern p = Pattern.compile("^https?://([a-zA-Z0-9]+\\.)?sankakucomplex\\.com/.*tags=([^&]+).*$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
try {
|
||||||
|
return URLDecoder.decode(m.group(1), "UTF-8");
|
||||||
|
} catch (UnsupportedEncodingException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
if (albumDoc == null) {
|
if (albumDoc == null) {
|
||||||
@ -71,9 +85,11 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|||||||
for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) {
|
for (Element thumbSpan : doc.select("div.content > div > span.thumb > a")) {
|
||||||
String postLink = thumbSpan.attr("href");
|
String postLink = thumbSpan.attr("href");
|
||||||
try {
|
try {
|
||||||
|
String subDomain = getSubDomain(url);
|
||||||
|
String siteURL = "https://" + subDomain + "sankakucomplex.com";
|
||||||
// Get the page the full sized image is on
|
// Get the page the full sized image is on
|
||||||
Document subPage = Http.url("https://chan.sankakucomplex.com" + postLink).get();
|
Document subPage = Http.url(siteURL + postLink).get();
|
||||||
logger.info("Checking page " + "https://chan.sankakucomplex.com" + postLink);
|
logger.info("Checking page " + siteURL + postLink);
|
||||||
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Error while loading page " + postLink, e);
|
logger.warn("Error while loading page " + postLink, e);
|
||||||
|
@ -57,7 +57,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
|
|||||||
String nextPage = elem.parent().attr("href");
|
String nextPage = elem.parent().attr("href");
|
||||||
// Some times this returns a empty string
|
// Some times this returns a empty string
|
||||||
// This for stops that
|
// This for stops that
|
||||||
if (nextPage == "") {
|
if (nextPage.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -34,7 +34,7 @@ public class SinnercomicsRipper extends AbstractHTMLRipper {
|
|||||||
Pattern p = Pattern.compile("^https?://sinnercomics.com/comic/([a-zA-Z0-9-]*)/?$");
|
Pattern p = Pattern.compile("^https?://sinnercomics.com/comic/([a-zA-Z0-9-]*)/?$");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
return m.group(1);
|
return m.group(1).replaceAll("-page-\\d+", "");
|
||||||
}
|
}
|
||||||
throw new MalformedURLException("Expected sinnercomics.com URL format: " +
|
throw new MalformedURLException("Expected sinnercomics.com URL format: " +
|
||||||
"sinnercomics.com/comic/albumName - got " + url + " instead");
|
"sinnercomics.com/comic/albumName - got " + url + " instead");
|
||||||
|
@ -3,24 +3,19 @@ package com.rarchives.ripme.ripper.rippers;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
/**
|
|
||||||
* Appears to be broken as of 2015-02-11.
|
public class SmuttyRipper extends AbstractHTMLRipper {
|
||||||
* Generating large image from thumbnail requires replacing "/m/" with something else:
|
|
||||||
* -> Sometimes "/b/"
|
|
||||||
* -> Sometimes "/p/"
|
|
||||||
* No way to know without loading the image page.
|
|
||||||
*/
|
|
||||||
public class SmuttyRipper extends AlbumRipper {
|
|
||||||
|
|
||||||
private static final String DOMAIN = "smutty.com",
|
private static final String DOMAIN = "smutty.com",
|
||||||
HOST = "smutty";
|
HOST = "smutty";
|
||||||
@ -29,6 +24,16 @@ public class SmuttyRipper extends AlbumRipper {
|
|||||||
super(url);
|
super(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "smutty";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "smutty.com";
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean canRip(URL url) {
|
public boolean canRip(URL url) {
|
||||||
return (url.getHost().endsWith(DOMAIN));
|
return (url.getHost().endsWith(DOMAIN));
|
||||||
@ -40,69 +45,57 @@ public class SmuttyRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
int page = 0;
|
List<String> results = new ArrayList<>();
|
||||||
String url, tag = getGID(this.url);
|
for (Element image : doc.select("a.l > img")) {
|
||||||
boolean hasNextPage = true;
|
|
||||||
while (hasNextPage) {
|
|
||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
page++;
|
String imageUrl = image.attr("src");
|
||||||
url = "http://smutty.com/h/" + tag + "/?q=%23" + tag + "&page=" + page + "&sort=date&lazy=1";
|
|
||||||
this.sendUpdate(STATUS.LOADING_RESOURCE, url);
|
|
||||||
logger.info(" Retrieving " + url);
|
|
||||||
Document doc;
|
|
||||||
try {
|
|
||||||
doc = Http.url(url)
|
|
||||||
.ignoreContentType()
|
|
||||||
.get();
|
|
||||||
} catch (IOException e) {
|
|
||||||
if (e.toString().contains("Status=404")) {
|
|
||||||
logger.info("No more pages to load");
|
|
||||||
} else {
|
|
||||||
logger.warn("Exception while loading " + url, e);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
for (Element image : doc.select("a.l > img")) {
|
|
||||||
if (isStopped()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
String imageUrl = image.attr("src");
|
|
||||||
|
|
||||||
// Construct direct link to image based on thumbnail
|
// Construct direct link to image based on thumbnail
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
String[] fields = imageUrl.split("/");
|
String[] fields = imageUrl.split("/");
|
||||||
for (int i = 0; i < fields.length; i++) {
|
for (int i = 0; i < fields.length; i++) {
|
||||||
if (i == fields.length - 2 && fields[i].equals("m")) {
|
if (i == fields.length - 2 && fields[i].equals("m")) {
|
||||||
fields[i] = "b";
|
fields[i] = "b";
|
||||||
}
|
}
|
||||||
sb.append(fields[i]);
|
sb.append(fields[i]);
|
||||||
if (i < fields.length - 1) {
|
if (i < fields.length - 1) {
|
||||||
sb.append("/");
|
sb.append("/");
|
||||||
}
|
|
||||||
}
|
}
|
||||||
imageUrl = sb.toString();
|
|
||||||
addURLToDownload(new URL("http:" + imageUrl));
|
|
||||||
}
|
|
||||||
if (doc.select("#next").size() == 0) {
|
|
||||||
break; // No more pages
|
|
||||||
}
|
|
||||||
// Wait before loading next page
|
|
||||||
try {
|
|
||||||
Thread.sleep(1000);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
logger.error("[!] Interrupted while waiting to load next album:", e);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
imageUrl = sb.toString();
|
||||||
|
results.add("http:" + imageUrl);
|
||||||
}
|
}
|
||||||
waitForThreads();
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getHost() {
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
return HOST;
|
Element elem = doc.select("a.next").first();
|
||||||
|
if (elem == null) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
String nextPage = elem.attr("href");
|
||||||
|
// Some times this returns a empty string
|
||||||
|
// This for stops that
|
||||||
|
if (nextPage.equals("")) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Http.url("https://smutty.com" + nextPage).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -117,6 +110,12 @@ public class SmuttyRipper extends AlbumRipper {
|
|||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
return m.group(1).replace("%23", "");
|
return m.group(1).replace("%23", "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p = Pattern.compile("^https?://smutty.com/user/([a-zA-Z0-9\\-_]+)/?$");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
|
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ import org.json.JSONObject;
|
|||||||
import org.jsoup.Connection;
|
import org.jsoup.Connection;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
|
||||||
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
@ -35,13 +34,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
// This sessionId will expire and need to be replaced
|
// This sessionId will expire and need to be replaced
|
||||||
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
|
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
|
||||||
logger.info(cookies);
|
|
||||||
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
|
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
|
||||||
String jsonInfo = doc.html().replaceAll("<html>","").replaceAll("<head></head>", "").replaceAll("<body>", "").replaceAll("</body>", "")
|
String jsonInfo = doc.html().replaceAll("<html>","").replaceAll("<head></head>", "").replaceAll("<body>", "").replaceAll("</body>", "")
|
||||||
.replaceAll("</html>", "").replaceAll("\n", "");
|
.replaceAll("</html>", "").replaceAll("\n", "");
|
||||||
logger.info(jsonInfo);
|
|
||||||
JSONObject json = new JSONObject(jsonInfo);
|
JSONObject json = new JSONObject(jsonInfo);
|
||||||
logger.info(json.getJSONArray("reader_page_urls"));
|
|
||||||
return json.getJSONArray("reader_page_urls");
|
return json.getJSONArray("reader_page_urls");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info(e);
|
logger.info(e);
|
||||||
@ -63,11 +59,16 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)");
|
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)/?");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
return m.group(1) + "_" + m.group(2);
|
return m.group(1) + "_" + m.group(2);
|
||||||
}
|
}
|
||||||
|
p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/?");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
throw new MalformedURLException("Expected tsumino URL format: " +
|
throw new MalformedURLException("Expected tsumino URL format: " +
|
||||||
"tsumino.com/Book/Info/ID/TITLE - got " + url + " instead");
|
"tsumino.com/Book/Info/ID/TITLE - got " + url + " instead");
|
||||||
}
|
}
|
||||||
@ -85,7 +86,6 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
|||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
Connection.Response resp = Http.url(url).response();
|
Connection.Response resp = Http.url(url).response();
|
||||||
cookies.putAll(resp.cookies());
|
cookies.putAll(resp.cookies());
|
||||||
logger.info(resp.parse());
|
|
||||||
return resp.parse();
|
return resp.parse();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,6 +103,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
|||||||
@Override
|
@Override
|
||||||
public void downloadURL(URL url, int index) {
|
public void downloadURL(URL url, int index) {
|
||||||
sleep(1000);
|
sleep(1000);
|
||||||
addURLToDownload(url, getPrefix(index));
|
/*
|
||||||
|
There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always
|
||||||
|
"image/jpeg" even when the image is a png. The file ext is not included in the url.
|
||||||
|
*/
|
||||||
|
addURLToDownload(url, getPrefix(index), "", null, null, null, null, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the API key.
|
* Gets the API key.
|
||||||
* Chooses between default/included keys & user specified ones (from the config file).
|
* Chooses between default/included keys & user specified ones (from the config file).
|
||||||
* @return Tumblr API key
|
* @return Tumblr API key
|
||||||
*/
|
*/
|
||||||
@ -57,7 +57,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
||||||
return userDefinedAPIKey;
|
return userDefinedAPIKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public TumblrRipper(URL url) throws IOException {
|
public TumblrRipper(URL url) throws IOException {
|
||||||
@ -71,12 +71,12 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
public boolean canRip(URL url) {
|
public boolean canRip(URL url) {
|
||||||
return url.getHost().endsWith(DOMAIN);
|
return url.getHost().endsWith(DOMAIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sanitizes URL.
|
* Sanitizes URL.
|
||||||
* @param url URL to be sanitized.
|
* @param url URL to be sanitized.
|
||||||
* @return Sanitized URL
|
* @return Sanitized URL
|
||||||
* @throws MalformedURLException
|
* @throws MalformedURLException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||||
@ -230,7 +230,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
|
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
|
||||||
fileURL = new URL(urlString);
|
fileURL = new URL(urlString);
|
||||||
} else {
|
} else {
|
||||||
fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http", "https"));
|
fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"));
|
||||||
}
|
}
|
||||||
m = p.matcher(fileURL.toString());
|
m = p.matcher(fileURL.toString());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
|
@ -4,14 +4,12 @@ import java.io.IOException;
|
|||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import org.jsoup.select.Elements;
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
@ -1,95 +0,0 @@
|
|||||||
package com.rarchives.ripme.ripper.rippers;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.util.regex.Matcher;
|
|
||||||
import java.util.regex.Pattern;
|
|
||||||
|
|
||||||
import org.json.JSONArray;
|
|
||||||
import org.json.JSONObject;
|
|
||||||
import org.jsoup.HttpStatusException;
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
|
||||||
import com.rarchives.ripme.utils.Http;
|
|
||||||
|
|
||||||
public class VineRipper extends AlbumRipper {
|
|
||||||
|
|
||||||
private static final String DOMAIN = "vine.co",
|
|
||||||
HOST = "vine";
|
|
||||||
|
|
||||||
public VineRipper(URL url) throws IOException {
|
|
||||||
super(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean canRip(URL url) {
|
|
||||||
return url.getHost().endsWith(DOMAIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
|
||||||
return new URL("http://vine.co/u/" + getGID(url));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void rip() throws IOException {
|
|
||||||
int page = 0;
|
|
||||||
String baseURL = "https://vine.co/api/timelines/users/" + getGID(this.url);
|
|
||||||
JSONObject json = null;
|
|
||||||
while (true) {
|
|
||||||
page++;
|
|
||||||
String theURL = baseURL;
|
|
||||||
if (page > 1) {
|
|
||||||
theURL += "?page=" + page;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
logger.info(" Retrieving " + theURL);
|
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, theURL);
|
|
||||||
json = Http.url(theURL).getJSON();
|
|
||||||
} catch (HttpStatusException e) {
|
|
||||||
logger.debug("Hit end of pages at page " + page, e);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
JSONArray records = json.getJSONObject("data").getJSONArray("records");
|
|
||||||
for (int i = 0; i < records.length(); i++) {
|
|
||||||
String videoURL = records.getJSONObject(i).getString("videoUrl");
|
|
||||||
addURLToDownload(new URL(videoURL));
|
|
||||||
if (isThisATest()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (isThisATest()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (records.length() == 0) {
|
|
||||||
logger.info("Zero records returned");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
Thread.sleep(2000);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
logger.error("[!] Interrupted while waiting to load next page", e);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
waitForThreads();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getHost() {
|
|
||||||
return HOST;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
|
||||||
Pattern p = Pattern.compile("^https?://(www\\.)?vine\\.co/u/([0-9]+).*$");
|
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
|
||||||
if (!m.matches()) {
|
|
||||||
throw new MalformedURLException("Expected format: http://vine.co/u/######");
|
|
||||||
}
|
|
||||||
return m.group(m.groupCount());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -36,7 +36,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean canRip(URL url) {
|
public boolean canRip(URL url) {
|
||||||
Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z]+/[a-zA-Z]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
|
Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
|
||||||
Matcher mat = pat.matcher(url.toExternalForm());
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
if (mat.matches()) {
|
if (mat.matches()) {
|
||||||
return true;
|
return true;
|
||||||
@ -47,7 +47,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||||
Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z]+/[a-zA-Z]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
|
Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
|
||||||
Matcher mat = pat.matcher(url.toExternalForm());
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
if (mat.matches()) {
|
if (mat.matches()) {
|
||||||
return getHost() + "_" + mat.group(1);
|
return getHost() + "_" + mat.group(1);
|
||||||
|
@ -44,7 +44,20 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
"freeadultcomix.com",
|
"freeadultcomix.com",
|
||||||
"thisis.delvecomic.com",
|
"thisis.delvecomic.com",
|
||||||
"tnbtu.com",
|
"tnbtu.com",
|
||||||
"shipinbottle.pepsaga.com"
|
"shipinbottle.pepsaga.com",
|
||||||
|
"8muses.download",
|
||||||
|
"spyingwithlana.com"
|
||||||
|
);
|
||||||
|
|
||||||
|
private static List<String> theme1 = Arrays.asList(
|
||||||
|
"www.totempole666.com",
|
||||||
|
"buttsmithy.com",
|
||||||
|
"themonsterunderthebed.net",
|
||||||
|
"prismblush.com",
|
||||||
|
"www.konradokonski.com",
|
||||||
|
"thisis.delvecomic.com",
|
||||||
|
"tnbtu.com",
|
||||||
|
"spyingwithlana.com"
|
||||||
);
|
);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -135,12 +148,79 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
if (shipinbottleMat.matches()) {
|
if (shipinbottleMat.matches()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
|
||||||
|
Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
|
||||||
|
if (eight_musesMat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
|
||||||
|
Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
|
||||||
|
if (spyingwithlanaMat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern pa = Pattern.compile("^https?://8muses.download/\\?s=([a-zA-Z0-9-]*)");
|
||||||
|
Matcher ma = pa.matcher(url.toExternalForm());
|
||||||
|
if (ma.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern pat = Pattern.compile("https?://8muses.download/page/\\d+/\\?s=([a-zA-Z0-9-]*)");
|
||||||
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pat = Pattern.compile("https://8muses.download/category/([a-zA-Z0-9-]*)/?");
|
||||||
|
mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasQueueSupport() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean pageContainsAlbums(URL url) {
|
||||||
|
Pattern pa = Pattern.compile("^https?://8muses.download/\\?s=([a-zA-Z0-9-]*)");
|
||||||
|
Matcher ma = pa.matcher(url.toExternalForm());
|
||||||
|
if (ma.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern pat = Pattern.compile("https?://8muses.download/page/\\d+/\\?s=([a-zA-Z0-9-]*)");
|
||||||
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pat = Pattern.compile("https://8muses.download/category/([a-zA-Z0-9-]*)/?");
|
||||||
|
mat = pat.matcher(url.toExternalForm());
|
||||||
|
if (mat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getAlbumsToQueue(Document doc) {
|
||||||
|
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||||
|
for (Element elem : doc.select("#post_masonry > article > div > figure > a")) {
|
||||||
|
urlsToAddToQueue.add(elem.attr("href"));
|
||||||
|
}
|
||||||
|
return urlsToAddToQueue;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||||
Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com/comic/([a-zA-Z0-9_-]*)/?$");
|
Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
@ -209,6 +289,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + "Ship_in_bottle";
|
return getHost() + "_" + "Ship_in_bottle";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
|
||||||
|
Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
|
||||||
|
if (eight_musesMat.matches()) {
|
||||||
|
return getHost() + "_" + eight_musesMat.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
|
||||||
|
Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
|
||||||
|
if (spyingwithlanaMat.matches()) {
|
||||||
|
return "spyingwithlana_" + spyingwithlanaMat.group(1).replaceAll("-page-\\d", "");
|
||||||
|
}
|
||||||
|
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,13 +319,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
// Find next page
|
// Find next page
|
||||||
String nextPage = "";
|
String nextPage = "";
|
||||||
Element elem = null;
|
Element elem = null;
|
||||||
if (getHost().contains("www.totempole666.com")
|
if (theme1.contains(getHost())) {
|
||||||
|| getHost().contains("buttsmithy.com")
|
|
||||||
|| getHost().contains("themonsterunderthebed.net")
|
|
||||||
|| getHost().contains("prismblush.com")
|
|
||||||
|| getHost().contains("www.konradokonski.com")
|
|
||||||
|| getHost().contains("thisis.delvecomic.com")
|
|
||||||
|| getHost().contains("tnbtu.com")) {
|
|
||||||
elem = doc.select("a.comic-nav-next").first();
|
elem = doc.select("a.comic-nav-next").first();
|
||||||
if (elem == null) {
|
if (elem == null) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
@ -247,7 +333,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
nextPage = elem.attr("href");
|
nextPage = elem.attr("href");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nextPage == "") {
|
if (nextPage.equals("")) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
} else {
|
} else {
|
||||||
return Http.url(nextPage).get();
|
return Http.url(nextPage).get();
|
||||||
@ -257,13 +343,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
@Override
|
@Override
|
||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
if (getHost().contains("www.totempole666.com")
|
if (theme1.contains(getHost())) {
|
||||||
|| getHost().contains("buttsmithy.com")
|
|
||||||
|| getHost().contains("themonsterunderthebed.net")
|
|
||||||
|| getHost().contains("prismblush.com")
|
|
||||||
|| getHost().contains("www.konradokonski.com")
|
|
||||||
|| getHost().contains("thisis.delvecomic.com")
|
|
||||||
|| getHost().contains("tnbtu.com")) {
|
|
||||||
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
|
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
|
||||||
// If doc is the last page in the comic then elem.attr("src") returns null
|
// If doc is the last page in the comic then elem.attr("src") returns null
|
||||||
// because there is no link <a> to the next page
|
// because there is no link <a> to the next page
|
||||||
@ -315,6 +395,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (url.toExternalForm().contains("8muses.download")) {
|
||||||
|
for (Element elem : doc.select("div.popup-gallery > figure > a")) {
|
||||||
|
result.add(elem.attr("href"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,8 +413,14 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
|| getHost().contains("themonsterunderthebed.net")) {
|
|| getHost().contains("themonsterunderthebed.net")) {
|
||||||
addURLToDownload(url, pageTitle + "_");
|
addURLToDownload(url, pageTitle + "_");
|
||||||
}
|
}
|
||||||
// If we're ripping a site where we can't get the page number/title we just rip normally
|
if (getHost().contains("tnbtu.com")) {
|
||||||
addURLToDownload(url, getPrefix(index));
|
// We need to set the referrer header for tnbtu
|
||||||
|
addURLToDownload(url, getPrefix(index), "","http://www.tnbtu.com/comic", null);
|
||||||
|
} else {
|
||||||
|
// If we're ripping a site where we can't get the page number/title we just rip normally
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -72,7 +72,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getNextPage(Document doc) throws IOException {
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
if (doc.select("a.next").first().attr("href") != "") {
|
if (!doc.select("a.next").first().attr("href").equals("")) {
|
||||||
return Http.url(doc.select("a.next").first().attr("href")).get();
|
return Http.url(doc.select("a.next").first().attr("href")).get();
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
|
@ -3,7 +3,6 @@ package com.rarchives.ripme.ripper.rippers.video;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.net.URLDecoder;
|
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
@ -16,9 +16,7 @@ import java.net.URL;
|
|||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
import java.util.Collections;
|
import java.util.*;
|
||||||
import java.util.Date;
|
|
||||||
import java.util.Enumeration;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import javax.imageio.ImageIO;
|
import javax.imageio.ImageIO;
|
||||||
@ -138,6 +136,17 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
|
|
||||||
private static AbstractRipper ripper;
|
private static AbstractRipper ripper;
|
||||||
|
|
||||||
|
private ResourceBundle rb = Utils.getResourceBundle();
|
||||||
|
|
||||||
|
private void updateQueueLabel() {
|
||||||
|
if (queueListModel.size() > 0) {
|
||||||
|
optionQueue.setText( rb.getString("Queue") + " (" + queueListModel.size() + ")");
|
||||||
|
} else {
|
||||||
|
optionQueue.setText(rb.getString("Queue"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private static void addCheckboxListener(JCheckBox checkBox, String configString) {
|
private static void addCheckboxListener(JCheckBox checkBox, String configString) {
|
||||||
checkBox.addActionListener(arg0 -> {
|
checkBox.addActionListener(arg0 -> {
|
||||||
Utils.setConfigBoolean(configString, checkBox.isSelected());
|
Utils.setConfigBoolean(configString, checkBox.isSelected());
|
||||||
@ -153,6 +162,11 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
return checkbox;
|
return checkbox;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static void addUrlToQueue(String url) {
|
||||||
|
queueListModel.addElement(url);
|
||||||
|
}
|
||||||
|
|
||||||
public MainWindow() {
|
public MainWindow() {
|
||||||
mainFrame = new JFrame("RipMe v" + UpdateUtils.getThisJarVersion());
|
mainFrame = new JFrame("RipMe v" + UpdateUtils.getThisJarVersion());
|
||||||
mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
|
mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
|
||||||
@ -289,7 +303,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
gbc.gridx = 3; ripPanel.add(stopButton, gbc);
|
gbc.gridx = 3; ripPanel.add(stopButton, gbc);
|
||||||
gbc.weightx = 1;
|
gbc.weightx = 1;
|
||||||
|
|
||||||
statusLabel = new JLabel("Inactive");
|
statusLabel = new JLabel(rb.getString("inactive"));
|
||||||
statusLabel.setHorizontalAlignment(JLabel.CENTER);
|
statusLabel.setHorizontalAlignment(JLabel.CENTER);
|
||||||
openButton = new JButton();
|
openButton = new JButton();
|
||||||
openButton.setVisible(false);
|
openButton.setVisible(false);
|
||||||
@ -307,10 +321,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
|
|
||||||
JPanel optionsPanel = new JPanel(new GridBagLayout());
|
JPanel optionsPanel = new JPanel(new GridBagLayout());
|
||||||
optionsPanel.setBorder(emptyBorder);
|
optionsPanel.setBorder(emptyBorder);
|
||||||
optionLog = new JButton("Log");
|
optionLog = new JButton(rb.getString("Log"));
|
||||||
optionHistory = new JButton("History");
|
optionHistory = new JButton(rb.getString("History"));
|
||||||
optionQueue = new JButton("Queue");
|
optionQueue = new JButton(rb.getString("Queue"));
|
||||||
optionConfiguration = new JButton("Configuration");
|
optionConfiguration = new JButton(rb.getString("Configuration"));
|
||||||
optionLog.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
optionLog.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
||||||
optionHistory.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
optionHistory.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
||||||
optionQueue.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
optionQueue.setFont(optionLog.getFont().deriveFont(Font.PLAIN));
|
||||||
@ -402,9 +416,9 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
historyTable.getColumnModel().getColumn(i).setPreferredWidth(width);
|
historyTable.getColumnModel().getColumn(i).setPreferredWidth(width);
|
||||||
}
|
}
|
||||||
JScrollPane historyTableScrollPane = new JScrollPane(historyTable);
|
JScrollPane historyTableScrollPane = new JScrollPane(historyTable);
|
||||||
historyButtonRemove = new JButton("Remove");
|
historyButtonRemove = new JButton(rb.getString("remove"));
|
||||||
historyButtonClear = new JButton("Clear");
|
historyButtonClear = new JButton(rb.getString("clear"));
|
||||||
historyButtonRerip = new JButton("Re-rip Checked");
|
historyButtonRerip = new JButton(rb.getString("re-rip.checked"));
|
||||||
gbc.gridx = 0;
|
gbc.gridx = 0;
|
||||||
// History List Panel
|
// History List Panel
|
||||||
JPanel historyTablePanel = new JPanel(new GridBagLayout());
|
JPanel historyTablePanel = new JPanel(new GridBagLayout());
|
||||||
@ -440,11 +454,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
for (String item : Utils.getConfigList("queue")) {
|
for (String item : Utils.getConfigList("queue")) {
|
||||||
queueListModel.addElement(item);
|
queueListModel.addElement(item);
|
||||||
}
|
}
|
||||||
if (queueListModel.size() > 0) {
|
updateQueueLabel();
|
||||||
optionQueue.setText("Queue (" + queueListModel.size() + ")");
|
|
||||||
} else {
|
|
||||||
optionQueue.setText("Queue");
|
|
||||||
}
|
|
||||||
gbc.gridx = 0;
|
gbc.gridx = 0;
|
||||||
JPanel queueListPanel = new JPanel(new GridBagLayout());
|
JPanel queueListPanel = new JPanel(new GridBagLayout());
|
||||||
gbc.fill = GridBagConstraints.BOTH;
|
gbc.fill = GridBagConstraints.BOTH;
|
||||||
@ -459,27 +469,27 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
configurationPanel.setBorder(emptyBorder);
|
configurationPanel.setBorder(emptyBorder);
|
||||||
configurationPanel.setVisible(false);
|
configurationPanel.setVisible(false);
|
||||||
// TODO Configuration components
|
// TODO Configuration components
|
||||||
configUpdateButton = new JButton("Check for updates");
|
configUpdateButton = new JButton(rb.getString("check.for.updates"));
|
||||||
configUpdateLabel = new JLabel("Current version: " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT);
|
configUpdateLabel = new JLabel( rb.getString("current.version") + ": " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT);
|
||||||
JLabel configThreadsLabel = new JLabel("Maximum download threads:", JLabel.RIGHT);
|
JLabel configThreadsLabel = new JLabel(rb.getString("max.download.threads") + ":", JLabel.RIGHT);
|
||||||
JLabel configTimeoutLabel = new JLabel("Timeout (in milliseconds):", JLabel.RIGHT);
|
JLabel configTimeoutLabel = new JLabel(rb.getString("timeout.mill"), JLabel.RIGHT);
|
||||||
JLabel configRetriesLabel = new JLabel("Retry download count:", JLabel.RIGHT);
|
JLabel configRetriesLabel = new JLabel(rb.getString("retry.download.count"), JLabel.RIGHT);
|
||||||
configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3)));
|
configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3)));
|
||||||
configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000)));
|
configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000)));
|
||||||
configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3)));
|
configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3)));
|
||||||
configOverwriteCheckbox = addNewCheckbox("Overwrite existing files?", "file.overwrite", false);
|
configOverwriteCheckbox = addNewCheckbox(rb.getString("overwrite.existing.files"), "file.overwrite", false);
|
||||||
configAutoupdateCheckbox = addNewCheckbox("Auto-update?", "auto.update", true);
|
configAutoupdateCheckbox = addNewCheckbox(rb.getString("auto.update"), "auto.update", true);
|
||||||
configPlaySound = addNewCheckbox("Sound when rip completes", "play.sound", false);
|
configPlaySound = addNewCheckbox(rb.getString("sound.when.rip.completes"), "play.sound", false);
|
||||||
configShowPopup = addNewCheckbox("Notification when rip starts", "download.show_popup", false);
|
configShowPopup = addNewCheckbox(rb.getString("notification.when.rip.starts"), "download.show_popup", false);
|
||||||
configSaveOrderCheckbox = addNewCheckbox("Preserve order", "download.save_order", true);
|
configSaveOrderCheckbox = addNewCheckbox(rb.getString("preserve.order"), "download.save_order", true);
|
||||||
configSaveLogs = addNewCheckbox("Save logs", "log.save", false);
|
configSaveLogs = addNewCheckbox(rb.getString("save.logs"), "log.save", false);
|
||||||
configSaveURLsOnly = addNewCheckbox("Save URLs only", "urls_only.save", false);
|
configSaveURLsOnly = addNewCheckbox(rb.getString("save.urls.only"), "urls_only.save", false);
|
||||||
configSaveAlbumTitles = addNewCheckbox("Save album titles", "album_titles.save", true);
|
configSaveAlbumTitles = addNewCheckbox(rb.getString("save.album.titles"), "album_titles.save", true);
|
||||||
configClipboardAutorip = addNewCheckbox("Autorip from Clipboard", "clipboard.autorip", false);
|
configClipboardAutorip = addNewCheckbox(rb.getString("autorip.from.clipboard"), "clipboard.autorip", false);
|
||||||
configSaveDescriptions = addNewCheckbox("Save descriptions", "descriptions.save", true);
|
configSaveDescriptions = addNewCheckbox(rb.getString("save.descriptions"), "descriptions.save", true);
|
||||||
configPreferMp4 = addNewCheckbox("Prefer MP4 over GIF","prefer.mp4", false);
|
configPreferMp4 = addNewCheckbox(rb.getString("prefer.mp4.over.gif"),"prefer.mp4", false);
|
||||||
configWindowPosition = addNewCheckbox("Restore window position", "window.position", true);
|
configWindowPosition = addNewCheckbox(rb.getString("restore.window.position"), "window.position", true);
|
||||||
configURLHistoryCheckbox = addNewCheckbox("Remember URL history", "remember.url_history", true);
|
configURLHistoryCheckbox = addNewCheckbox(rb.getString("remember.url.history"), "remember.url_history", true);
|
||||||
|
|
||||||
configLogLevelCombobox = new JComboBox(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
|
configLogLevelCombobox = new JComboBox(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
|
||||||
configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug"));
|
configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug"));
|
||||||
@ -785,11 +795,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
queueListModel.addListDataListener(new ListDataListener() {
|
queueListModel.addListDataListener(new ListDataListener() {
|
||||||
@Override
|
@Override
|
||||||
public void intervalAdded(ListDataEvent arg0) {
|
public void intervalAdded(ListDataEvent arg0) {
|
||||||
if (queueListModel.size() > 0) {
|
updateQueueLabel();
|
||||||
optionQueue.setText("Queue (" + queueListModel.size() + ")");
|
|
||||||
} else {
|
|
||||||
optionQueue.setText("Queue");
|
|
||||||
}
|
|
||||||
if (!isRipping) {
|
if (!isRipping) {
|
||||||
ripNextAlbum();
|
ripNextAlbum();
|
||||||
}
|
}
|
||||||
@ -966,7 +972,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
HISTORY.clear();
|
HISTORY.clear();
|
||||||
if (historyFile.exists()) {
|
if (historyFile.exists()) {
|
||||||
try {
|
try {
|
||||||
logger.info("Loading history from " + historyFile.getCanonicalPath());
|
logger.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
|
||||||
HISTORY.fromFile(historyFile.getCanonicalPath());
|
HISTORY.fromFile(historyFile.getCanonicalPath());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to load history from file " + historyFile, e);
|
logger.error("Failed to load history from file " + historyFile, e);
|
||||||
@ -979,7 +985,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
JOptionPane.ERROR_MESSAGE);
|
JOptionPane.ERROR_MESSAGE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.info("Loading history from configuration");
|
logger.info(rb.getString("loading.history.from.configuration"));
|
||||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||||
if (HISTORY.toList().size() == 0) {
|
if (HISTORY.toList().size() == 0) {
|
||||||
// Loaded from config, still no entries.
|
// Loaded from config, still no entries.
|
||||||
@ -1025,17 +1031,13 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
String nextAlbum = (String) queueListModel.remove(0);
|
String nextAlbum = (String) queueListModel.remove(0);
|
||||||
if (queueListModel.isEmpty()) {
|
updateQueueLabel();
|
||||||
optionQueue.setText("Queue");
|
|
||||||
} else {
|
|
||||||
optionQueue.setText("Queue (" + queueListModel.size() + ")");
|
|
||||||
}
|
|
||||||
Thread t = ripAlbum(nextAlbum);
|
Thread t = ripAlbum(nextAlbum);
|
||||||
if (t == null) {
|
if (t == null) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
logger.error("Interrupted while waiting to rip next album", ie);
|
logger.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
|
||||||
}
|
}
|
||||||
ripNextAlbum();
|
ripNextAlbum();
|
||||||
} else {
|
} else {
|
||||||
|
@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
public class UpdateUtils {
|
public class UpdateUtils {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||||
private static final String DEFAULT_VERSION = "1.7.27";
|
private static final String DEFAULT_VERSION = "1.7.47";
|
||||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||||
private static final String mainFileName = "ripme.jar";
|
private static final String mainFileName = "ripme.jar";
|
||||||
|
99
src/main/java/com/rarchives/ripme/utils/Proxy.java
Normal file
99
src/main/java/com/rarchives/ripme/utils/Proxy.java
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package com.rarchives.ripme.utils;
|
||||||
|
|
||||||
|
import java.net.Authenticator;
|
||||||
|
import java.net.PasswordAuthentication;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.HashMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Proxy/Socks setter
|
||||||
|
*/
|
||||||
|
public class Proxy {
|
||||||
|
private Proxy() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse the proxy server settings from string, using the format
|
||||||
|
* [user:password]@host[:port].
|
||||||
|
*
|
||||||
|
* @param fullproxy the string to parse
|
||||||
|
* @return HashMap containing proxy server, port, user and password
|
||||||
|
*/
|
||||||
|
private static Map<String, String> parseServer(String fullproxy) {
|
||||||
|
Map<String, String> proxy = new HashMap<String, String>();
|
||||||
|
|
||||||
|
if (fullproxy.lastIndexOf("@") != -1) {
|
||||||
|
int sservli = fullproxy.lastIndexOf("@");
|
||||||
|
String userpw = fullproxy.substring(0, sservli);
|
||||||
|
String[] usersplit = userpw.split(":");
|
||||||
|
proxy.put("user", usersplit[0]);
|
||||||
|
proxy.put("password", usersplit[1]);
|
||||||
|
fullproxy = fullproxy.substring(sservli + 1);
|
||||||
|
}
|
||||||
|
String[] servsplit = fullproxy.split(":");
|
||||||
|
if (servsplit.length == 2) {
|
||||||
|
proxy.put("port", servsplit[1]);
|
||||||
|
}
|
||||||
|
proxy.put("server", servsplit[0]);
|
||||||
|
return proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set a HTTP Proxy.
|
||||||
|
* WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
|
||||||
|
* passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
|
||||||
|
* see https://stackoverflow.com/q/41505219
|
||||||
|
*
|
||||||
|
* @param fullproxy the proxy, using format [user:password]@host[:port]
|
||||||
|
*/
|
||||||
|
public static void setHTTPProxy(String fullproxy) {
|
||||||
|
Map<String, String> proxyServer = parseServer(fullproxy);
|
||||||
|
|
||||||
|
if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
|
||||||
|
Authenticator.setDefault(new Authenticator(){
|
||||||
|
protected PasswordAuthentication getPasswordAuthentication(){
|
||||||
|
PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
System.setProperty("http.proxyUser", proxyServer.get("user"));
|
||||||
|
System.setProperty("http.proxyPassword", proxyServer.get("password"));
|
||||||
|
System.setProperty("https.proxyUser", proxyServer.get("user"));
|
||||||
|
System.setProperty("https.proxyPassword", proxyServer.get("password"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (proxyServer.get("port") != null) {
|
||||||
|
System.setProperty("http.proxyPort", proxyServer.get("port"));
|
||||||
|
System.setProperty("https.proxyPort", proxyServer.get("port"));
|
||||||
|
}
|
||||||
|
|
||||||
|
System.setProperty("http.proxyHost", proxyServer.get("server"));
|
||||||
|
System.setProperty("https.proxyHost", proxyServer.get("server"));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set a Socks Proxy Server (globally).
|
||||||
|
*
|
||||||
|
* @param fullsocks the socks server, using format [user:password]@host[:port]
|
||||||
|
*/
|
||||||
|
public static void setSocks(String fullsocks) {
|
||||||
|
|
||||||
|
Map<String, String> socksServer = parseServer(fullsocks);
|
||||||
|
if (socksServer.get("user") != null && socksServer.get("password") != null) {
|
||||||
|
Authenticator.setDefault(new Authenticator(){
|
||||||
|
protected PasswordAuthentication getPasswordAuthentication(){
|
||||||
|
PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
System.setProperty("java.net.socks.username", socksServer.get("user"));
|
||||||
|
System.setProperty("java.net.socks.password", socksServer.get("password"));
|
||||||
|
}
|
||||||
|
if (socksServer.get("port") != null) {
|
||||||
|
System.setProperty("socksProxyPort", socksServer.get("port"));
|
||||||
|
}
|
||||||
|
|
||||||
|
System.setProperty("socksProxyHost", socksServer.get("server"));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -9,19 +9,18 @@ import java.util.List;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.EromeRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||||
import org.apache.commons.lang.math.NumberUtils;
|
import org.apache.commons.lang.math.NumberUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurImage;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
|
||||||
|
|
||||||
public class RipUtils {
|
public class RipUtils {
|
||||||
private static final Logger logger = Logger.getLogger(RipUtils.class);
|
private static final Logger logger = Logger.getLogger(RipUtils.class);
|
||||||
@ -35,8 +34,8 @@ public class RipUtils {
|
|||||||
&& url.toExternalForm().contains("imgur.com/a/")) {
|
&& url.toExternalForm().contains("imgur.com/a/")) {
|
||||||
try {
|
try {
|
||||||
logger.debug("Fetching imgur album at " + url);
|
logger.debug("Fetching imgur album at " + url);
|
||||||
ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
|
ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
|
||||||
for (ImgurImage imgurImage : imgurAlbum.images) {
|
for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
|
||||||
logger.debug("Got imgur image: " + imgurImage.url);
|
logger.debug("Got imgur image: " + imgurImage.url);
|
||||||
result.add(imgurImage.url);
|
result.add(imgurImage.url);
|
||||||
}
|
}
|
||||||
@ -49,8 +48,8 @@ public class RipUtils {
|
|||||||
// Imgur image series.
|
// Imgur image series.
|
||||||
try {
|
try {
|
||||||
logger.debug("Fetching imgur series at " + url);
|
logger.debug("Fetching imgur series at " + url);
|
||||||
ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
|
ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
|
||||||
for (ImgurImage imgurImage : imgurAlbum.images) {
|
for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
|
||||||
logger.debug("Got imgur image: " + imgurImage.url);
|
logger.debug("Got imgur image: " + imgurImage.url);
|
||||||
result.add(imgurImage.url);
|
result.add(imgurImage.url);
|
||||||
}
|
}
|
||||||
@ -91,6 +90,21 @@ public class RipUtils {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else if (url.toExternalForm().contains("erome.com")) {
|
||||||
|
try {
|
||||||
|
logger.info("Getting eroshare album " + url);
|
||||||
|
EromeRipper r = new EromeRipper(url);
|
||||||
|
Document tempDoc = r.getFirstPage();
|
||||||
|
for (String u : r.getURLsFromPage(tempDoc)) {
|
||||||
|
result.add(new URL(u));
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Do nothing
|
||||||
|
logger.warn("Exception while retrieving eroshare page:", e);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*");
|
Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
@ -122,8 +136,8 @@ public class RipUtils {
|
|||||||
try {
|
try {
|
||||||
// Fetch the page
|
// Fetch the page
|
||||||
Document doc = Jsoup.connect(url.toExternalForm())
|
Document doc = Jsoup.connect(url.toExternalForm())
|
||||||
.userAgent(AbstractRipper.USER_AGENT)
|
.userAgent(AbstractRipper.USER_AGENT)
|
||||||
.get();
|
.get();
|
||||||
for (Element el : doc.select("meta")) {
|
for (Element el : doc.select("meta")) {
|
||||||
if (el.attr("name").equals("twitter:image:src")) {
|
if (el.attr("name").equals("twitter:image:src")) {
|
||||||
result.add(new URL(el.attr("content")));
|
result.add(new URL(el.attr("content")));
|
||||||
|
46
src/main/java/com/rarchives/ripme/utils/UTF8Control.java
Normal file
46
src/main/java/com/rarchives/ripme/utils/UTF8Control.java
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package com.rarchives.ripme.utils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLConnection;
|
||||||
|
import java.util.Locale;
|
||||||
|
import java.util.PropertyResourceBundle;
|
||||||
|
import java.util.ResourceBundle;
|
||||||
|
|
||||||
|
// Code taken from https://stackoverflow.com/questions/4659929/how-to-use-utf-8-in-resource-properties-with-resourcebundle/4660195#4660195
|
||||||
|
|
||||||
|
public class UTF8Control extends ResourceBundle.Control {
|
||||||
|
public ResourceBundle newBundle
|
||||||
|
(String baseName, Locale locale, String format, ClassLoader loader, boolean reload)
|
||||||
|
throws IllegalAccessException, InstantiationException, IOException
|
||||||
|
{
|
||||||
|
// The below is a copy of the default implementation.
|
||||||
|
String bundleName = toBundleName(baseName, locale);
|
||||||
|
String resourceName = toResourceName(bundleName, "properties");
|
||||||
|
ResourceBundle bundle = null;
|
||||||
|
InputStream stream = null;
|
||||||
|
if (reload) {
|
||||||
|
URL url = loader.getResource(resourceName);
|
||||||
|
if (url != null) {
|
||||||
|
URLConnection connection = url.openConnection();
|
||||||
|
if (connection != null) {
|
||||||
|
connection.setUseCaches(false);
|
||||||
|
stream = connection.getInputStream();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stream = loader.getResourceAsStream(resourceName);
|
||||||
|
}
|
||||||
|
if (stream != null) {
|
||||||
|
try {
|
||||||
|
// Only this line is changed to make it to read properties files as UTF-8.
|
||||||
|
bundle = new PropertyResourceBundle(new InputStreamReader(stream, "UTF-8"));
|
||||||
|
} finally {
|
||||||
|
stream.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bundle;
|
||||||
|
}
|
||||||
|
}
|
@ -1,9 +1,6 @@
|
|||||||
package com.rarchives.ripme.utils;
|
package com.rarchives.ripme.utils;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.*;
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.UnsupportedEncodingException;
|
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
@ -92,9 +89,25 @@ public class Utils {
|
|||||||
return workingDir;
|
return workingDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the value of a specific config key.
|
||||||
|
*
|
||||||
|
* @param key The name of the config parameter you want to find.
|
||||||
|
* @param defaultValue What the default value would be.
|
||||||
|
*/
|
||||||
public static String getConfigString(String key, String defaultValue) {
|
public static String getConfigString(String key, String defaultValue) {
|
||||||
return config.getString(key, defaultValue);
|
return config.getString(key, defaultValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String[] getConfigStringArray(String key) {
|
||||||
|
String[] s = config.getStringArray(key);
|
||||||
|
if (s.length == 0) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static int getConfigInteger(String key, int defaultValue) {
|
public static int getConfigInteger(String key, int defaultValue) {
|
||||||
return config.getInt(key, defaultValue);
|
return config.getInt(key, defaultValue);
|
||||||
}
|
}
|
||||||
@ -135,31 +148,53 @@ public class Utils {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if your current system is a Windows system.
|
||||||
|
*/
|
||||||
private static boolean isWindows() {
|
private static boolean isWindows() {
|
||||||
return OS.contains("win");
|
return OS.contains("win");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if your current system is a Mac system
|
||||||
|
*/
|
||||||
private static boolean isMacOS() {
|
private static boolean isMacOS() {
|
||||||
return OS.contains("mac");
|
return OS.contains("mac");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if current system is based on UNIX
|
||||||
|
*/
|
||||||
private static boolean isUnix() {
|
private static boolean isUnix() {
|
||||||
return OS.contains("nix") || OS.contains("nux") || OS.contains("bsd");
|
return OS.contains("nix") || OS.contains("nux") || OS.contains("bsd");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the directory of where the config file is stored on a Windows machine.
|
||||||
|
*/
|
||||||
private static String getWindowsConfigDir() {
|
private static String getWindowsConfigDir() {
|
||||||
return System.getenv("LOCALAPPDATA") + File.separator + "ripme";
|
return System.getenv("LOCALAPPDATA") + File.separator + "ripme";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the directory of where the config file is stored on a UNIX machine.
|
||||||
|
*/
|
||||||
private static String getUnixConfigDir() {
|
private static String getUnixConfigDir() {
|
||||||
return System.getProperty("user.home") + File.separator + ".config" + File.separator + "ripme";
|
return System.getProperty("user.home") + File.separator + ".config" + File.separator + "ripme";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the directory of where the config file is stored on a Mac machine.
|
||||||
|
*/
|
||||||
private static String getMacOSConfigDir() {
|
private static String getMacOSConfigDir() {
|
||||||
return System.getProperty("user.home")
|
return System.getProperty("user.home")
|
||||||
+ File.separator + "Library" + File.separator + "Application Support" + File.separator + "ripme";
|
+ File.separator + "Library" + File.separator + "Application Support" + File.separator + "ripme";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if the app is running in a portable mode. i.e. on a USB stick
|
||||||
|
*/
|
||||||
private static boolean portableMode() {
|
private static boolean portableMode() {
|
||||||
try {
|
try {
|
||||||
File f = new File(new File(".").getCanonicalPath() + File.separator + configFile);
|
File f = new File(new File(".").getCanonicalPath() + File.separator + configFile);
|
||||||
@ -172,7 +207,9 @@ public class Utils {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the directory of the config directory, for all systems.
|
||||||
|
*/
|
||||||
public static String getConfigDir() {
|
public static String getConfigDir() {
|
||||||
if (portableMode()) {
|
if (portableMode()) {
|
||||||
try {
|
try {
|
||||||
@ -192,17 +229,24 @@ public class Utils {
|
|||||||
return ".";
|
return ".";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Delete the url history file
|
/**
|
||||||
|
* Delete the url history file
|
||||||
|
*/
|
||||||
public static void clearURLHistory() {
|
public static void clearURLHistory() {
|
||||||
File file = new File(getURLHistoryFile());
|
File file = new File(getURLHistoryFile());
|
||||||
file.delete();
|
file.delete();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the path of the url history file
|
/**
|
||||||
|
* Return the path of the url history file
|
||||||
|
*/
|
||||||
public static String getURLHistoryFile() {
|
public static String getURLHistoryFile() {
|
||||||
return getConfigDir() + File.separator + "url_history.txt";
|
return getConfigDir() + File.separator + "url_history.txt";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the path to the configuration file.
|
||||||
|
*/
|
||||||
private static String getConfigFilePath() {
|
private static String getConfigFilePath() {
|
||||||
return getConfigDir() + File.separator + configFile;
|
return getConfigDir() + File.separator + configFile;
|
||||||
}
|
}
|
||||||
@ -228,6 +272,15 @@ public class Utils {
|
|||||||
return prettySaveAs;
|
return prettySaveAs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Strips away URL parameters, which usually appear at the end of URLs.
|
||||||
|
* E.g. the ?query on PHP
|
||||||
|
*
|
||||||
|
* @param url The URL to filter/strip
|
||||||
|
* @param parameter The parameter to strip
|
||||||
|
*
|
||||||
|
* @return The stripped URL
|
||||||
|
*/
|
||||||
public static String stripURLParameter(String url, String parameter) {
|
public static String stripURLParameter(String url, String parameter) {
|
||||||
int paramIndex = url.indexOf("?" + parameter);
|
int paramIndex = url.indexOf("?" + parameter);
|
||||||
boolean wasFirstParam = true;
|
boolean wasFirstParam = true;
|
||||||
@ -255,6 +308,7 @@ public class Utils {
|
|||||||
/**
|
/**
|
||||||
* Removes the current working directory from a given filename
|
* Removes the current working directory from a given filename
|
||||||
* @param file
|
* @param file
|
||||||
|
* Path to the file
|
||||||
* @return
|
* @return
|
||||||
* 'file' without the leading current working directory
|
* 'file' without the leading current working directory
|
||||||
*/
|
*/
|
||||||
@ -338,9 +392,24 @@ public class Utils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static final int SHORTENED_PATH_LENGTH = 12;
|
private static final int SHORTENED_PATH_LENGTH = 12;
|
||||||
|
/**
|
||||||
|
* Shortens the path to a file
|
||||||
|
* @param path
|
||||||
|
* String of the path to the file
|
||||||
|
* @return
|
||||||
|
* The simplified path to the file.
|
||||||
|
*/
|
||||||
public static String shortenPath(String path) {
|
public static String shortenPath(String path) {
|
||||||
return shortenPath(new File(path));
|
return shortenPath(new File(path));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shortens the path to a file
|
||||||
|
* @param file
|
||||||
|
* File object that you want the shortened path of.
|
||||||
|
* @return
|
||||||
|
* The simplified path to the file.
|
||||||
|
*/
|
||||||
public static String shortenPath(File file) {
|
public static String shortenPath(File file) {
|
||||||
String path = removeCWD(file);
|
String path = removeCWD(file);
|
||||||
if (path.length() < SHORTENED_PATH_LENGTH * 2) {
|
if (path.length() < SHORTENED_PATH_LENGTH * 2) {
|
||||||
@ -351,6 +420,13 @@ public class Utils {
|
|||||||
+ path.substring(path.length() - SHORTENED_PATH_LENGTH);
|
+ path.substring(path.length() - SHORTENED_PATH_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitizes a string so that a filesystem can handle it
|
||||||
|
* @param text
|
||||||
|
* The text to be sanitized.
|
||||||
|
* @return
|
||||||
|
* The sanitized text.
|
||||||
|
*/
|
||||||
public static String filesystemSanitized(String text) {
|
public static String filesystemSanitized(String text) {
|
||||||
text = text.replaceAll("[^a-zA-Z0-9.-]", "_");
|
text = text.replaceAll("[^a-zA-Z0-9.-]", "_");
|
||||||
return text;
|
return text;
|
||||||
@ -400,6 +476,13 @@ public class Utils {
|
|||||||
return original;
|
return original;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts an integer into a human readable string
|
||||||
|
* @param bytes
|
||||||
|
* Non-human readable integer.
|
||||||
|
* @return
|
||||||
|
* Human readable interpretation of a byte.
|
||||||
|
*/
|
||||||
public static String bytesToHumanReadable(int bytes) {
|
public static String bytesToHumanReadable(int bytes) {
|
||||||
float fbytes = (float) bytes;
|
float fbytes = (float) bytes;
|
||||||
String[] mags = new String[] {"", "K", "M", "G", "T"};
|
String[] mags = new String[] {"", "K", "M", "G", "T"};
|
||||||
@ -411,6 +494,10 @@ public class Utils {
|
|||||||
return String.format("%.2f%siB", fbytes, mags[magIndex]);
|
return String.format("%.2f%siB", fbytes, mags[magIndex]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets and returns a list of all the album rippers present in the "com.rarchives.ripme.ripper.rippers" package.
|
||||||
|
* @return List<String> of all album rippers present.
|
||||||
|
*/
|
||||||
public static List<String> getListOfAlbumRippers() throws Exception {
|
public static List<String> getListOfAlbumRippers() throws Exception {
|
||||||
List<String> list = new ArrayList<>();
|
List<String> list = new ArrayList<>();
|
||||||
for (Constructor<?> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
|
for (Constructor<?> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
|
||||||
@ -418,6 +505,11 @@ public class Utils {
|
|||||||
}
|
}
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets and returns a list of all video rippers present in the "com.rarchives.rime.rippers.video" package
|
||||||
|
* @return List<String> of all the video rippers.
|
||||||
|
*/
|
||||||
public static List<String> getListOfVideoRippers() throws Exception {
|
public static List<String> getListOfVideoRippers() throws Exception {
|
||||||
List<String> list = new ArrayList<>();
|
List<String> list = new ArrayList<>();
|
||||||
for (Constructor<?> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
|
for (Constructor<?> ripper : AbstractRipper.getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
|
||||||
@ -426,6 +518,11 @@ public class Utils {
|
|||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Plays a sound from a file.
|
||||||
|
* @param filename
|
||||||
|
* Path to the sound file
|
||||||
|
*/
|
||||||
public static void playSound(String filename) {
|
public static void playSound(String filename) {
|
||||||
URL resource = ClassLoader.getSystemClassLoader().getResource(filename);
|
URL resource = ClassLoader.getSystemClassLoader().getResource(filename);
|
||||||
try {
|
try {
|
||||||
@ -563,6 +660,9 @@ public class Utils {
|
|||||||
cookieCache = new HashMap<String, HashMap<String, String>>();
|
cookieCache = new HashMap<String, HashMap<String, String>>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets all the cookies from a certain host
|
||||||
|
*/
|
||||||
public static Map<String, String> getCookies(String host) {
|
public static Map<String, String> getCookies(String host) {
|
||||||
HashMap<String, String> domainCookies = cookieCache.get(host);
|
HashMap<String, String> domainCookies = cookieCache.get(host);
|
||||||
if (domainCookies == null) {
|
if (domainCookies == null) {
|
||||||
@ -579,4 +679,25 @@ public class Utils {
|
|||||||
}
|
}
|
||||||
return domainCookies;
|
return domainCookies;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the ResourceBundle AKA language package.
|
||||||
|
* Used for choosing the language of the UI.
|
||||||
|
*
|
||||||
|
* @return Returns the default resource bundle using the language specified in the config file.
|
||||||
|
*/
|
||||||
|
public static ResourceBundle getResourceBundle() {
|
||||||
|
if (!getConfigString("lang", "").equals("")) {
|
||||||
|
String[] langCode = getConfigString("lang", "").split("_");
|
||||||
|
logger.info("Setting locale to " + getConfigString("lang", ""));
|
||||||
|
return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.getDefault(), new UTF8Control());
|
||||||
|
return rb;
|
||||||
|
} catch (MissingResourceException e) {
|
||||||
|
ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.ROOT);
|
||||||
|
return rb;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
37
src/main/resources/LabelsBundle.properties
Normal file
37
src/main/resources/LabelsBundle.properties
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
Log = Log
|
||||||
|
History = History
|
||||||
|
created = created
|
||||||
|
modified = modified
|
||||||
|
Queue = Queue
|
||||||
|
Configuration = Configuration
|
||||||
|
|
||||||
|
# Keys for the Configuration menu
|
||||||
|
|
||||||
|
current.version = Current version
|
||||||
|
check.for.updates = Check for updates
|
||||||
|
auto.update = Auto-update?
|
||||||
|
max.download.threads = Maximum download threads
|
||||||
|
timeout.mill = Timeout (in milliseconds):
|
||||||
|
retry.download.count = Retry download count
|
||||||
|
overwrite.existing.files = Overwrite existing files?
|
||||||
|
sound.when.rip.completes = Sound when rip completes
|
||||||
|
preserve.order = Preserve order
|
||||||
|
save.logs = Save logs
|
||||||
|
notification.when.rip.starts = Notification when rip starts
|
||||||
|
save.urls.only = Save URLs only
|
||||||
|
save.album.titles = Save album titles
|
||||||
|
autorip.from.clipboard = Autorip from Clipboard
|
||||||
|
save.descriptions = Save descriptions
|
||||||
|
prefer.mp4.over.gif = Prefer MP4 over GIF
|
||||||
|
restore.window.position = Restore window position
|
||||||
|
remember.url.history = Remember URL history
|
||||||
|
loading.history.from = Loading history from
|
||||||
|
|
||||||
|
# Misc UI keys
|
||||||
|
|
||||||
|
loading.history.from.configuration = Loading history from configuration
|
||||||
|
interrupted.while.waiting.to.rip.next.album = Interrupted while waiting to rip next album
|
||||||
|
inactive = Inactive
|
||||||
|
re-rip.checked = Re-rip Checked
|
||||||
|
remove = Remove
|
||||||
|
clear = Clear
|
38
src/main/resources/LabelsBundle_de_DE.properties
Normal file
38
src/main/resources/LabelsBundle_de_DE.properties
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
Log = Log
|
||||||
|
History = Verlauf
|
||||||
|
created = erstellt
|
||||||
|
modified = geändert
|
||||||
|
Queue = Queue
|
||||||
|
Configuration = Konfiguration
|
||||||
|
|
||||||
|
# Keys for the Configuration menu
|
||||||
|
|
||||||
|
current.version = Aktuelle Version
|
||||||
|
check.for.updates = Suche nach Aktualisierungen
|
||||||
|
auto.update = Automatisch Aktualisieren?
|
||||||
|
max.download.threads = Maximum download threads
|
||||||
|
timeout.mill = Timeout (in milliseconds):
|
||||||
|
retry.download.count = Anzahl der Downloadversuche
|
||||||
|
overwrite.existing.files = Überschreibe bereits existierende Dateien?
|
||||||
|
sound.when.rip.completes = Ton abspielen bei fertigem Download
|
||||||
|
preserve.order = Reihenfolge beibehalten
|
||||||
|
save.logs = Speichere Logs
|
||||||
|
notification.when.rip.starts = Benachrichtigung wenn Download startet
|
||||||
|
save.urls.only = Speicher nur URLs
|
||||||
|
save.album.titles = Speichere Albumtitels
|
||||||
|
autorip.from.clipboard = Automatisch Downloaden von der Zwischenablage
|
||||||
|
save.descriptions = Speichere Beschreibungen
|
||||||
|
prefer.mp4.over.gif = Bevorzuge MP4 über GIF
|
||||||
|
restore.window.position = Wieder herstellen der Fensterposition
|
||||||
|
remember.url.history = Erinnere URL Verlauf
|
||||||
|
loading.history.from = Lade Verlauf von
|
||||||
|
|
||||||
|
# Misc UI keys
|
||||||
|
|
||||||
|
loading.history.from.configuration = Lade Verlauf aus Konfiguration
|
||||||
|
interrupted.while.waiting.to.rip.next.album = Unterbrochen während Download des nächsten Albums
|
||||||
|
inactive = Inaktiv
|
||||||
|
re-rip.checked = Re-rip Überprüft
|
||||||
|
remove = Entfernen
|
||||||
|
clear = Leeren
|
||||||
|
|
37
src/main/resources/LabelsBundle_es_ES.properties
Normal file
37
src/main/resources/LabelsBundle_es_ES.properties
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
Log = Log
|
||||||
|
History = Historia
|
||||||
|
created = creado
|
||||||
|
modified = modificado
|
||||||
|
Queue = Cola
|
||||||
|
Configuration = Configuracion
|
||||||
|
|
||||||
|
# Keys for the Configuration menu
|
||||||
|
|
||||||
|
current.version = Version Actual
|
||||||
|
check.for.updates = Buscar actualizaciones
|
||||||
|
auto.update = Auto-actualizar?
|
||||||
|
max.download.threads = Maximos procesos de descarga
|
||||||
|
timeout.mill = Timeout (in milliseconds):
|
||||||
|
retry.download.count = Numero de reintentos de descarga
|
||||||
|
overwrite.existing.files = Sobreescribir archivos existentes?
|
||||||
|
sound.when.rip.completes = Sonar cuando el Rip termina
|
||||||
|
preserve.order = Mantener orden
|
||||||
|
save.logs = Guardar logs
|
||||||
|
notification.when.rip.starts = Notificar cuando el Rip comienza
|
||||||
|
save.urls.only = Guardar solamente URLs
|
||||||
|
save.album.titles = Guardar titulos de albunes
|
||||||
|
autorip.from.clipboard = Autorip desde Portapapeles
|
||||||
|
save.descriptions = Guardar descripciones
|
||||||
|
prefer.mp4.over.gif = Preferir MP4 sobre GIF
|
||||||
|
restore.window.position = Restaurar posicion de ventana
|
||||||
|
remember.url.history = Recordar historia URL
|
||||||
|
loading.history.from = Cargando historia desde
|
||||||
|
|
||||||
|
# Misc UI keys
|
||||||
|
|
||||||
|
loading.history.from.configuration = Cargando historia desde la configuracion
|
||||||
|
interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el Rip del proximo album
|
||||||
|
inactive = Inactivo
|
||||||
|
re-rip.checked = Re-rip marcado
|
||||||
|
remove = Quitar
|
||||||
|
clear = Limpiar
|
37
src/main/resources/LabelsBundle_fr_CH.properties
Normal file
37
src/main/resources/LabelsBundle_fr_CH.properties
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
Log = Journal
|
||||||
|
History = Historique
|
||||||
|
created = créé le
|
||||||
|
modified = modifié le
|
||||||
|
Queue = File d'attente
|
||||||
|
Configuration = Configuration
|
||||||
|
|
||||||
|
# Keys for the Configuration menu
|
||||||
|
|
||||||
|
current.version = Version actuelle
|
||||||
|
check.for.updates = Vérifier mises à jour
|
||||||
|
auto.update = Mises à jour automatiques?
|
||||||
|
max.download.threads = Nombre de téléchargements parallèles maximum
|
||||||
|
timeout.mill = Délai d'expiration (en millisecondes):
|
||||||
|
retry.download.count = Nombre d'essais téléchargement
|
||||||
|
overwrite.existing.files = Remplacer fichiers existants ?
|
||||||
|
sound.when.rip.completes = Son lorsque le rip est terminé
|
||||||
|
preserve.order = Conserver l'ordre
|
||||||
|
save.logs = Enregistrer journaux
|
||||||
|
notification.when.rip.starts = Notification lorsqu'un rip commence
|
||||||
|
save.urls.only = Enregistrer URL uniquement
|
||||||
|
save.album.titles = Enregistrer titres d'album
|
||||||
|
autorip.from.clipboard = Autorip depuis presse-papier
|
||||||
|
save.descriptions = Enregistrer descriptions
|
||||||
|
prefer.mp4.over.gif = Préférer MP4 à GIF
|
||||||
|
restore.window.position = Restaurer la position de la fenêtre
|
||||||
|
remember.url.history = Se souvenir de l'historique des URL
|
||||||
|
loading.history.from = Charger l'historique depuis
|
||||||
|
|
||||||
|
# Misc UI keys
|
||||||
|
|
||||||
|
loading.history.from.configuration = Charger l'historique depuis la configuration
|
||||||
|
interrupted.while.waiting.to.rip.next.album = Interrompu lors de l'attente pour ripper le prochain album
|
||||||
|
inactive = Inactif
|
||||||
|
re-rip.checked = Re-rip vérifié
|
||||||
|
remove = Enlever
|
||||||
|
clear = Effacer
|
37
src/main/resources/LabelsBundle_pt_PT.properties
Normal file
37
src/main/resources/LabelsBundle_pt_PT.properties
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
Log = Registo
|
||||||
|
History = Histórico
|
||||||
|
created = criado
|
||||||
|
modified = modificado
|
||||||
|
Queue = Fila
|
||||||
|
Configuration = Configuração
|
||||||
|
|
||||||
|
# Keys for the Configuration menu
|
||||||
|
|
||||||
|
current.version = Versão atual
|
||||||
|
check.for.updates = Verificar atualizações
|
||||||
|
auto.update = Atualização automática?
|
||||||
|
max.download.threads = Número máximo de processos de transferência
|
||||||
|
timeout.mill = Timeout (em milissegundos):
|
||||||
|
retry.download.count = Número de novas tentativas de transferência
|
||||||
|
overwrite.existing.files = Sobrescrever ficheiros existentes?
|
||||||
|
sound.when.rip.completes = Notificar quando o rip é concluído
|
||||||
|
preserve.order = Manter a ordem
|
||||||
|
save.logs = Guardar registos
|
||||||
|
notification.when.rip.starts = Notificar quando o rip começar
|
||||||
|
save.urls.only = Apenas guardar URLs
|
||||||
|
save.album.titles = Guardar os títulos de álbuns
|
||||||
|
autorip.from.clipboard = Autorip da área de transferência
|
||||||
|
save.descriptions = Guardar descrições
|
||||||
|
prefer.mp4.over.gif = Preferir MP4 a GIF
|
||||||
|
restore.window.position = Restaurar posição da janela
|
||||||
|
remember.url.history = Lembrar histórico de URL
|
||||||
|
loading.history.from = Carregar histórico de
|
||||||
|
|
||||||
|
# Misc UI keys
|
||||||
|
|
||||||
|
loading.history.from.configuration = A carregar o histórico da configuração
|
||||||
|
interrupted.while.waiting.to.rip.next.album = Interrompido durante a espera do rip do próximo álbum
|
||||||
|
inactive = Inativo
|
||||||
|
re-rip.checked = Re-rip verificado
|
||||||
|
remove = Remover
|
||||||
|
clear = Limpar
|
@ -0,0 +1,30 @@
|
|||||||
|
package com.rarchives.ripme.tst;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
public class AbstractRipperTest extends TestCase {
|
||||||
|
|
||||||
|
public void testGetFileName() throws IOException {
|
||||||
|
String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test");
|
||||||
|
assertEquals("test.test", fileName);
|
||||||
|
|
||||||
|
fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null);
|
||||||
|
assertEquals("test", fileName);
|
||||||
|
|
||||||
|
fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null);
|
||||||
|
assertEquals("Object", fileName);
|
||||||
|
|
||||||
|
fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null);
|
||||||
|
assertEquals("file.png", fileName);
|
||||||
|
|
||||||
|
fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null);
|
||||||
|
assertEquals("file.", fileName);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
52
src/test/java/com/rarchives/ripme/tst/proxyTest.java
Normal file
52
src/test/java/com/rarchives/ripme/tst/proxyTest.java
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package com.rarchives.ripme.tst;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
import com.rarchives.ripme.utils.Proxy;
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import junit.framework.TestCase;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
|
||||||
|
public class proxyTest extends TestCase {
|
||||||
|
|
||||||
|
|
||||||
|
// This test will only run on machines where the user has added a entry for proxy.socks
|
||||||
|
public void testSocksProxy() throws IOException {
|
||||||
|
// Unset proxy before testing
|
||||||
|
System.setProperty("http.proxyHost", "");
|
||||||
|
System.setProperty("https.proxyHost", "");
|
||||||
|
System.setProperty("socksProxyHost", "");
|
||||||
|
URL url = new URL("https://icanhazip.com");
|
||||||
|
String proxyConfig = Utils.getConfigString("proxy.socks", "");
|
||||||
|
if (!proxyConfig.equals("")) {
|
||||||
|
String ip1 = Http.url(url).ignoreContentType().get().text();
|
||||||
|
Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
|
||||||
|
String ip2 = Http.url(url).ignoreContentType().get().text();
|
||||||
|
assertFalse(ip1.equals(ip2));
|
||||||
|
} else {
|
||||||
|
System.out.println("Skipping testSocksProxy");
|
||||||
|
assert(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test will only run on machines where the user has added a entry for proxy.http
|
||||||
|
public void testHTTPProxy() throws IOException {
|
||||||
|
// Unset proxy before testing
|
||||||
|
System.setProperty("http.proxyHost", "");
|
||||||
|
System.setProperty("https.proxyHost", "");
|
||||||
|
System.setProperty("socksProxyHost", "");
|
||||||
|
URL url = new URL("https://icanhazip.com");
|
||||||
|
String proxyConfig = Utils.getConfigString("proxy.http", "");
|
||||||
|
if (!proxyConfig.equals("")) {
|
||||||
|
String ip1 = Http.url(url).ignoreContentType().get().text();
|
||||||
|
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
|
||||||
|
String ip2 = Http.url(url).ignoreContentType().get().text();
|
||||||
|
assertFalse(ip1.equals(ip2));
|
||||||
|
} else {
|
||||||
|
System.out.println("Skipping testHTTPProxy");
|
||||||
|
assert(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -3,7 +3,7 @@ package com.rarchives.ripme.tst.ripper.rippers;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;;
|
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;
|
||||||
|
|
||||||
public class AerisdiesRipperTest extends RippersTest {
|
public class AerisdiesRipperTest extends RippersTest {
|
||||||
public void testAlbum() throws IOException {
|
public void testAlbum() throws IOException {
|
||||||
|
@ -0,0 +1,25 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.BatoRipper;
|
||||||
|
|
||||||
|
public class BatoRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
BatoRipper ripper = new BatoRipper(new URL("https://bato.to/chapter/1207152"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://bato.to/chapter/1207152");
|
||||||
|
BatoRipper ripper = new BatoRipper(url);
|
||||||
|
assertEquals("1207152", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetAlbumTitle() throws IOException {
|
||||||
|
URL url = new URL("https://bato.to/chapter/1207152");
|
||||||
|
BatoRipper ripper = new BatoRipper(url);
|
||||||
|
assertEquals("bato_1207152_I_Messed_Up_by_Teaching_at_a_Black_Gyaru_School!_Ch.2", ripper.getAlbumTitle(url));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.BlackbrickroadofozRipper;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
public class BlackbrickroadofozRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
BlackbrickroadofozRipper ripper = new BlackbrickroadofozRipper(new URL("http://www.blackbrickroadofoz.com/comic/beginning"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -16,4 +16,10 @@ public class DeviantartRipperTest extends RippersTest {
|
|||||||
DeviantartRipper ripper = new DeviantartRipper(new URL("http://faterkcx.deviantart.com/gallery/"));
|
DeviantartRipper ripper = new DeviantartRipper(new URL("http://faterkcx.deviantart.com/gallery/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://airgee.deviantart.com/gallery/");
|
||||||
|
DeviantartRipper ripper = new DeviantartRipper(url);
|
||||||
|
assertEquals("airgee", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,18 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.DynastyscansRipper;
|
||||||
|
|
||||||
|
public class DynastyscansRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
|
||||||
|
assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")));
|
||||||
|
}
|
||||||
|
}
|
@ -17,4 +17,9 @@ public class EightmusesRipperTest extends RippersTest {
|
|||||||
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
|
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGID() throws IOException {
|
||||||
|
EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
|
||||||
|
assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")));
|
||||||
|
}
|
||||||
}
|
}
|
@ -10,4 +10,10 @@ public class FuraffinityRipperTest extends RippersTest {
|
|||||||
FuraffinityRipper ripper = new FuraffinityRipper(new URL("https://www.furaffinity.net/gallery/mustardgas/"));
|
FuraffinityRipper ripper = new FuraffinityRipper(new URL("https://www.furaffinity.net/gallery/mustardgas/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/");
|
||||||
|
FuraffinityRipper ripper = new FuraffinityRipper(url);
|
||||||
|
assertEquals("mustardgas", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,19 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper;
|
||||||
|
|
||||||
|
public class GfycatporntubeRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/");
|
||||||
|
GfycatporntubeRipper ripper = new GfycatporntubeRipper(url);
|
||||||
|
assertEquals("blowjob-bunny-puts-on-a-show", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
}
|
@ -7,7 +7,7 @@ import com.rarchives.ripme.ripper.rippers.Hentai2readRipper;
|
|||||||
|
|
||||||
public class Hentai2readRipperTest extends RippersTest {
|
public class Hentai2readRipperTest extends RippersTest {
|
||||||
public void testHentai2readAlbum() throws IOException {
|
public void testHentai2readAlbum() throws IOException {
|
||||||
Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/"));
|
Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,14 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.HitomiRipper;
|
||||||
|
|
||||||
|
public class HitomiRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html"));
|
||||||
|
testRipper(ripper);
|
||||||
|
assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973"));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.HypnohubRipper;
|
||||||
|
|
||||||
|
public class HypnohubRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
|
||||||
|
URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
|
||||||
|
HypnohubRipper ripper = new HypnohubRipper(poolURL);
|
||||||
|
testRipper(ripper);
|
||||||
|
ripper = new HypnohubRipper(postURL);
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
|
||||||
|
HypnohubRipper ripper = new HypnohubRipper(poolURL);
|
||||||
|
assertEquals("2303", ripper.getGID(poolURL));
|
||||||
|
|
||||||
|
URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
|
||||||
|
assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL));
|
||||||
|
}
|
||||||
|
}
|
@ -10,4 +10,10 @@ public class ImagevenueRipperTest extends RippersTest {
|
|||||||
ImagevenueRipper ripper = new ImagevenueRipper(new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo"));
|
ImagevenueRipper ripper = new ImagevenueRipper(new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo");
|
||||||
|
ImagevenueRipper ripper = new ImagevenueRipper(url);
|
||||||
|
assertEquals("gallery_1373818527696_191lo", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,10 @@ public class ImgboxRipperTest extends RippersTest {
|
|||||||
ImgboxRipper ripper = new ImgboxRipper(new URL("https://imgbox.com/g/FJPF7t26FD"));
|
ImgboxRipper ripper = new ImgboxRipper(new URL("https://imgbox.com/g/FJPF7t26FD"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://imgbox.com/g/FJPF7t26FD");
|
||||||
|
ImgboxRipper ripper = new ImgboxRipper(url);
|
||||||
|
assertEquals("FJPF7t26FD", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
package com.rarchives.ripme.tst.ripper.rippers;
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
|
||||||
|
|
||||||
public class ImgurRipperTest extends RippersTest {
|
public class ImgurRipperTest extends RippersTest {
|
||||||
|
|
||||||
public void testImgurURLFailures() throws IOException {
|
public void testImgurURLFailures() throws IOException {
|
||||||
@ -17,7 +17,6 @@ public class ImgurRipperTest extends RippersTest {
|
|||||||
failURLs.add(new URL("http://imgur.com/"));
|
failURLs.add(new URL("http://imgur.com/"));
|
||||||
failURLs.add(new URL("http://i.imgur.com"));
|
failURLs.add(new URL("http://i.imgur.com"));
|
||||||
failURLs.add(new URL("http://i.imgur.com/"));
|
failURLs.add(new URL("http://i.imgur.com/"));
|
||||||
failURLs.add(new URL("http://imgur.com/image"));
|
|
||||||
failURLs.add(new URL("http://imgur.com/image.jpg"));
|
failURLs.add(new URL("http://imgur.com/image.jpg"));
|
||||||
failURLs.add(new URL("http://i.imgur.com/image.jpg"));
|
failURLs.add(new URL("http://i.imgur.com/image.jpg"));
|
||||||
for (URL url : failURLs) {
|
for (URL url : failURLs) {
|
||||||
@ -50,6 +49,15 @@ public class ImgurRipperTest extends RippersTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testImgurSingleImage() throws IOException {
|
||||||
|
List<URL> contentURLs = new ArrayList<>();
|
||||||
|
contentURLs.add(new URL("http://imgur.com/qbfcLyG")); // Single image URL
|
||||||
|
contentURLs.add(new URL("https://imgur.com/KexUO")); // Single image URL
|
||||||
|
for (URL url : contentURLs) {
|
||||||
|
ImgurRipper ripper = new ImgurRipper(url);
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testImgurAlbumWithMoreThan20Pictures() throws IOException {
|
public void testImgurAlbumWithMoreThan20Pictures() throws IOException {
|
||||||
ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq"));
|
ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq"));
|
||||||
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ManganeloRipper;
|
||||||
|
|
||||||
|
public class ManganeloRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
ManganeloRipper ripper = new ManganeloRipper(new URL("http://manganelo.com/manga/black_clover"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -10,4 +10,9 @@ public class ModelmayhemRipperTest extends RippersTest {
|
|||||||
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
|
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
|
||||||
|
assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall")));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ModelxRipper;
|
||||||
|
|
||||||
|
public class ModelxRipperTest extends RippersTest {
|
||||||
|
public void testModelxAlbum() throws IOException {
|
||||||
|
ModelxRipper ripper = new ModelxRipper(new URL("http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -10,4 +10,15 @@ public class MyhentaicomicsRipperTest extends RippersTest {
|
|||||||
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"));
|
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales");
|
||||||
|
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
|
||||||
|
// Test a comic
|
||||||
|
assertEquals("Nienna-Lost-Tales", ripper.getGID(url));
|
||||||
|
// Test a search
|
||||||
|
assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test")));
|
||||||
|
// Test a tag
|
||||||
|
assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/")));
|
||||||
|
}
|
||||||
}
|
}
|
@ -0,0 +1,33 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.NhentaiRipper;
|
||||||
|
|
||||||
|
public class NhentaiRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/"));
|
||||||
|
assertEquals("233295", ripper.getGID(new URL("https://nhentai.net/g/233295/")));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the tag black listing
|
||||||
|
public void testTagBlackList() throws IOException {
|
||||||
|
URL url = new URL("https://nhentai.net/g/233295/");
|
||||||
|
NhentaiRipper ripper = new NhentaiRipper(url);
|
||||||
|
// Test multiple blacklisted tags
|
||||||
|
String[] tags = {"test", "one", "blowjob"};
|
||||||
|
String blacklistedTag = ripper.checkTags(ripper.getFirstPage(), tags);
|
||||||
|
assertEquals("blowjob", blacklistedTag);
|
||||||
|
|
||||||
|
// test tags with spaces in them
|
||||||
|
String[] tags2 = {"test", "one", "sole female"};
|
||||||
|
blacklistedTag = ripper.checkTags(ripper.getFirstPage(), tags2);
|
||||||
|
assertEquals("sole female", blacklistedTag);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,18 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.NudeGalsRipper;
|
||||||
|
|
||||||
|
public class NudeGalsRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
|
||||||
|
assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")));
|
||||||
|
}
|
||||||
|
}
|
@ -10,4 +10,10 @@ public class PornhubRipperTest extends RippersTest {
|
|||||||
PornhubRipper ripper = new PornhubRipper(new URL("https://www.pornhub.com/album/15680522"));
|
PornhubRipper ripper = new PornhubRipper(new URL("https://www.pornhub.com/album/15680522"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://www.pornhub.com/album/15680522");
|
||||||
|
PornhubRipper ripper = new PornhubRipper(url);
|
||||||
|
assertEquals("15680522", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,20 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.Rule34Ripper;
|
||||||
|
|
||||||
|
public class Rule34RipperTest extends RippersTest {
|
||||||
|
public void testShesFreakyRip() throws IOException {
|
||||||
|
Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo");
|
||||||
|
Rule34Ripper ripper = new Rule34Ripper(url);
|
||||||
|
assertEquals("bimbo", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -17,4 +17,15 @@ public class SankakuComplexRipperTest extends RippersTest {
|
|||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
public void testgetGID() throws IOException {
|
||||||
|
URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
|
||||||
|
SankakuComplexRipper ripper = new SankakuComplexRipper(url);
|
||||||
|
assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testgetSubDomain() throws IOException {
|
||||||
|
URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29");
|
||||||
|
SankakuComplexRipper ripper = new SankakuComplexRipper(url);
|
||||||
|
assertEquals("idol.", ripper.getSubDomain(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,4 +13,10 @@ public class ShesFreakyRipperTest extends RippersTest {
|
|||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html");
|
||||||
|
ShesFreakyRipper ripper = new ShesFreakyRipper(url);
|
||||||
|
assertEquals("nicee-snow-bunny-579NbPjUcYa", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,10 @@ public class SinfestRipperTest extends RippersTest {
|
|||||||
SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17"));
|
SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://sinfest.net/view.php?date=2000-01-17");
|
||||||
|
SinfestRipper ripper = new SinfestRipper(url);
|
||||||
|
assertEquals("2000-01-17", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
@ -10,4 +10,10 @@ public class SinnercomicsRipperTest extends RippersTest {
|
|||||||
SinnercomicsRipper ripper = new SinnercomicsRipper(new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/"));
|
SinnercomicsRipper ripper = new SinnercomicsRipper(new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://sinnercomics.com/comic/beyond-the-hotel-page-01/");
|
||||||
|
SinnercomicsRipper ripper = new SinnercomicsRipper(url);
|
||||||
|
assertEquals("beyond-the-hotel", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
@ -0,0 +1,19 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.SmuttyRipper;
|
||||||
|
|
||||||
|
public class SmuttyRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://smutty.com/user/QUIGON/");
|
||||||
|
SmuttyRipper ripper = new SmuttyRipper(url);
|
||||||
|
assertEquals("QUIGON", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
}
|
@ -10,4 +10,10 @@ public class StaRipperTest extends RippersTest {
|
|||||||
StaRipper ripper = new StaRipper(new URL("https://sta.sh/2hn9rtavr1g"));
|
StaRipper ripper = new StaRipper(new URL("https://sta.sh/2hn9rtavr1g"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://sta.sh/2hn9rtavr1g");
|
||||||
|
StaRipper ripper = new StaRipper(url);
|
||||||
|
assertEquals("2hn9rtavr1g", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
@ -10,4 +10,10 @@ public class TapasticRipperTest extends RippersTest {
|
|||||||
TapasticRipper ripper = new TapasticRipper(new URL("https://tapas.io/series/tsiwbakd-comic"));
|
TapasticRipper ripper = new TapasticRipper(new URL("https://tapas.io/series/tsiwbakd-comic"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://tapas.io/series/tsiwbakd-comic");
|
||||||
|
TapasticRipper ripper = new TapasticRipper(url);
|
||||||
|
assertEquals("series_ tsiwbakd-comic", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,10 @@ public class TeenplanetRipperTest extends RippersTest {
|
|||||||
TeenplanetRipper ripper = new TeenplanetRipper(new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html"));
|
TeenplanetRipper ripper = new TeenplanetRipper(new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html");
|
||||||
|
TeenplanetRipper ripper = new TeenplanetRipper(url);
|
||||||
|
assertEquals("the-perfect-side-of-me-6588", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,10 @@ public class TheyiffgalleryRipperTest extends RippersTest {
|
|||||||
TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URL("https://theyiffgallery.com/index?/category/4303"));
|
TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URL("https://theyiffgallery.com/index?/category/4303"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://theyiffgallery.com/index?/category/4303");
|
||||||
|
TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(url);
|
||||||
|
assertEquals("4303", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,5 +10,11 @@ public class VidbleRipperTest extends RippersTest {
|
|||||||
VidbleRipper ripper = new VidbleRipper(new URL("http://www.vidble.com/album/y1oyh3zd"));
|
VidbleRipper ripper = new VidbleRipper(new URL("http://www.vidble.com/album/y1oyh3zd"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://www.vidble.com/album/y1oyh3zd");
|
||||||
|
VidbleRipper ripper = new VidbleRipper(url);
|
||||||
|
assertEquals("y1oyh3zd", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,14 +38,17 @@ public class VideoRippersTest extends RippersTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testTwitchVideoRipper() throws IOException {
|
|
||||||
List<URL> contentURLs = new ArrayList<>();
|
// Test disbaled. See https://github.com/RipMeApp/ripme/issues/574
|
||||||
contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull"));
|
|
||||||
for (URL url : contentURLs) {
|
// public void testTwitchVideoRipper() throws IOException {
|
||||||
TwitchVideoRipper ripper = new TwitchVideoRipper(url);
|
// List<URL> contentURLs = new ArrayList<>();
|
||||||
videoTestHelper(ripper);
|
// contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull"));
|
||||||
}
|
// for (URL url : contentURLs) {
|
||||||
}
|
// TwitchVideoRipper ripper = new TwitchVideoRipper(url);
|
||||||
|
// videoTestHelper(ripper);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
public void testXhamsterRipper() throws IOException {
|
public void testXhamsterRipper() throws IOException {
|
||||||
List<URL> contentURLs = new ArrayList<>();
|
List<URL> contentURLs = new ArrayList<>();
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
package com.rarchives.ripme.tst.ripper.rippers;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.URL;
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.VineRipper;
|
|
||||||
|
|
||||||
public class VineRipperTest extends RippersTest {
|
|
||||||
// https://github.com/RipMeApp/ripme/issues/181
|
|
||||||
/*
|
|
||||||
public void testVineRip() throws IOException {
|
|
||||||
VineRipper ripper = new VineRipper(new URL("https://vine.co/u/954440445776334848"));
|
|
||||||
testRipper(ripper);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
|
@ -15,4 +15,10 @@ public class WebtoonsRipperTest extends RippersTest {
|
|||||||
WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145"));
|
WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://www.webtoons.com/en/drama/my-boo/ep-33/viewer?title_no=1185&episode_no=33");
|
||||||
|
WebtoonsRipper ripper = new WebtoonsRipper(url);
|
||||||
|
assertEquals("my-boo", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,6 +55,7 @@ public class WordpressComicRipperTest extends RippersTest {
|
|||||||
WordpressComicRipper ripper = new WordpressComicRipper(
|
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||||
new URL("http://www.konradokonski.com/sawdust/comic/get-up/"));
|
new URL("http://www.konradokonski.com/sawdust/comic/get-up/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test_konradokonski_2() throws IOException {
|
public void test_konradokonski_2() throws IOException {
|
||||||
@ -63,6 +64,13 @@ public class WordpressComicRipperTest extends RippersTest {
|
|||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void test_konradokonski_getAlbumTitle() throws IOException {
|
||||||
|
URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/");
|
||||||
|
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||||
|
assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
||||||
public void test_freeadultcomix() throws IOException {
|
public void test_freeadultcomix() throws IOException {
|
||||||
@ -83,6 +91,32 @@ public class WordpressComicRipperTest extends RippersTest {
|
|||||||
new URL("http://tnbtu.com/comic/01-00/"));
|
new URL("http://tnbtu.com/comic/01-00/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void test_Eightmuses_download() throws IOException {
|
||||||
|
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||||
|
new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void test_Eightmuses_getAlbumTitle() throws IOException {
|
||||||
|
URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/");
|
||||||
|
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||||
|
assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses",
|
||||||
|
ripper.getAlbumTitle(url));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void test_spyingwithlana_download() throws IOException {
|
||||||
|
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||||
|
new URL("http://spyingwithlana.com/comic/the-big-hookup/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void test_spyingwithlana_getAlbumTitle() throws IOException {
|
||||||
|
URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/");
|
||||||
|
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||||
|
assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url));
|
||||||
|
}
|
||||||
|
|
||||||
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
||||||
// public void test_pepsaga() throws IOException {
|
// public void test_pepsaga() throws IOException {
|
||||||
// WordpressComicRipper ripper = new WordpressComicRipper(
|
// WordpressComicRipper ripper = new WordpressComicRipper(
|
||||||
|
@ -10,4 +10,10 @@ public class XbooruRipperTest extends RippersTest {
|
|||||||
XbooruRipper ripper = new XbooruRipper(new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry"));
|
XbooruRipper ripper = new XbooruRipper(new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://xbooru.com/index.php?page=post&s=list&tags=furry");
|
||||||
|
XbooruRipper ripper = new XbooruRipper(url);
|
||||||
|
assertEquals("furry", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
@ -16,4 +16,10 @@ public class XhamsterRipperTest extends RippersTest {
|
|||||||
XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664"));
|
XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664");
|
||||||
|
XhamsterRipper ripper = new XhamsterRipper(url);
|
||||||
|
assertEquals("7254664", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,4 +16,10 @@ public class YuvutuRipperTest extends RippersTest {
|
|||||||
YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333"));
|
YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333");
|
||||||
|
YuvutuRipper ripper = new YuvutuRipper(url);
|
||||||
|
assertEquals("420333", ripper.getGID(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,16 @@ public class ZizkiRipperTest extends RippersTest {
|
|||||||
ZizkiRipper ripper = new ZizkiRipper(new URL("http://zizki.com/dee-chorde/we-got-spirit"));
|
ZizkiRipper ripper = new ZizkiRipper(new URL("http://zizki.com/dee-chorde/we-got-spirit"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetGID() throws IOException {
|
||||||
|
URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit");
|
||||||
|
ZizkiRipper ripper = new ZizkiRipper(url);
|
||||||
|
assertEquals("dee-chorde", ripper.getGID(url));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testAlbumTitle() throws IOException {
|
||||||
|
URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit");
|
||||||
|
ZizkiRipper ripper = new ZizkiRipper(url);
|
||||||
|
assertEquals("zizki_Dee Chorde_We Got Spirit", ripper.getAlbumTitle(url));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user