commit
f9577b99ed
@ -36,6 +36,9 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
|
|||||||
|
|
||||||
* Quickly downloads all images in an online album (see supported sites below)
|
* Quickly downloads all images in an online album (see supported sites below)
|
||||||
* Easily re-rip albums to fetch new content
|
* Easily re-rip albums to fetch new content
|
||||||
|
* Built in updater
|
||||||
|
* Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image)
|
||||||
|
* Skips already downloaded images by default
|
||||||
|
|
||||||
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
||||||
|
|
||||||
|
1
build.sh
1
build.sh
@ -1 +1,2 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
mvn clean compile assembly:single
|
mvn clean compile assembly:single
|
2
pom.xml
2
pom.xml
@ -4,7 +4,7 @@
|
|||||||
<groupId>com.rarchives.ripme</groupId>
|
<groupId>com.rarchives.ripme</groupId>
|
||||||
<artifactId>ripme</artifactId>
|
<artifactId>ripme</artifactId>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<version>1.7.19</version>
|
<version>1.7.33</version>
|
||||||
<name>ripme</name>
|
<name>ripme</name>
|
||||||
<url>http://rip.rarchives.com</url>
|
<url>http://rip.rarchives.com</url>
|
||||||
<properties>
|
<properties>
|
||||||
|
16
ripme.json
16
ripme.json
@ -1,6 +1,20 @@
|
|||||||
{
|
{
|
||||||
"latestVersion": "1.7.19",
|
"latestVersion": "1.7.33",
|
||||||
"changeList": [
|
"changeList": [
|
||||||
|
"1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
|
||||||
|
"1.7.32: Instagram ripper update to use new enpoints",
|
||||||
|
"1.7.31: InstaGram ripper no longer errors out when getting next page",
|
||||||
|
"1.7.30: Fixed usage of command-line on non-headless systems",
|
||||||
|
"1.7.29: Cano now download single images from imgur; Improved handling of headless mode & OS-specific config; Added modelx ripper; Fixed eroshae ripper",
|
||||||
|
"1.7.28: IG ripper now uses display_url when downloading images; Reddit ripper now gets erome links; Hentaifoundry Ripper no longer errors out when there is no next page",
|
||||||
|
"1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
|
||||||
|
"1.7.26: fixed instagram ripper",
|
||||||
|
"1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
|
||||||
|
"1.7.24: Added sta.sh ripper; Added sinfest.com ripper; Added femjoyhunter.com ripper; Disabled flaky unit tests",
|
||||||
|
"1.7.23: Fixed xvideos ripper; InstagramRipper now works with lastseenfeature",
|
||||||
|
"1.7.22: Added func to normalize urls before reading from/writing to url history file; last seen feature now works with instagram",
|
||||||
|
"1.7.21: Fixed last seen feature",
|
||||||
|
"1.7.20: Fixed 8muses ripper; Added last seen feature; disabled 500px test",
|
||||||
"1.7.19: imgurRipper no longer tries to add ?1 to file names",
|
"1.7.19: imgurRipper no longer tries to add ?1 to file names",
|
||||||
"1.7.18: AlbumRipper now removes bad chars from file names",
|
"1.7.18: AlbumRipper now removes bad chars from file names",
|
||||||
"1.7.17: Fixed hentai.cafe autorip from clipboard",
|
"1.7.17: Fixed hentai.cafe autorip from clipboard",
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package com.rarchives.ripme;
|
package com.rarchives.ripme;
|
||||||
|
|
||||||
|
import java.awt.*;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
@ -18,6 +19,7 @@ import org.apache.commons.cli.CommandLine;
|
|||||||
import org.apache.commons.cli.HelpFormatter;
|
import org.apache.commons.cli.HelpFormatter;
|
||||||
import org.apache.commons.cli.Options;
|
import org.apache.commons.cli.Options;
|
||||||
import org.apache.commons.cli.ParseException;
|
import org.apache.commons.cli.ParseException;
|
||||||
|
import org.apache.commons.lang.SystemUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
@ -34,35 +36,34 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
*/
|
*/
|
||||||
public class App {
|
public class App {
|
||||||
|
|
||||||
public static final Logger logger;
|
public static final Logger logger = Logger.getLogger(App.class);
|
||||||
private static final History HISTORY = new History();
|
private static final History HISTORY = new History();
|
||||||
|
|
||||||
static {
|
|
||||||
//initialize logger
|
|
||||||
Utils.configureLogger();
|
|
||||||
logger = Logger.getLogger(App.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void main(String[] args) throws MalformedURLException {
|
public static void main(String[] args) throws MalformedURLException {
|
||||||
CommandLine cl = getArgs(args);
|
CommandLine cl = getArgs(args);
|
||||||
|
|
||||||
if (args.length > 0 && cl.hasOption('v')){
|
if (args.length > 0 && cl.hasOption('v')){
|
||||||
logger.error(UpdateUtils.getThisJarVersion());
|
logger.info(UpdateUtils.getThisJarVersion());
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
System.setProperty("apple.laf.useScreenMenuBar", "true");
|
if (GraphicsEnvironment.isHeadless() || args.length > 0) {
|
||||||
System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
|
|
||||||
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
|
||||||
|
|
||||||
if (args.length > 0) {
|
|
||||||
// CLI Mode
|
|
||||||
handleArguments(args);
|
handleArguments(args);
|
||||||
} else {
|
} else {
|
||||||
// GUI Mode
|
if (SystemUtils.IS_OS_MAC_OSX) {
|
||||||
|
System.setProperty("apple.laf.useScreenMenuBar", "true");
|
||||||
|
System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
|
||||||
|
}
|
||||||
|
|
||||||
|
Utils.configureLogger();
|
||||||
|
|
||||||
|
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
||||||
|
|
||||||
MainWindow mw = new MainWindow();
|
MainWindow mw = new MainWindow();
|
||||||
SwingUtilities.invokeLater(mw);
|
SwingUtilities.invokeLater(mw);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an abstract ripper and instructs it to rip.
|
* Creates an abstract ripper and instructs it to rip.
|
||||||
* @param url URL to be ripped
|
* @param url URL to be ripped
|
||||||
@ -80,20 +81,28 @@ public class App {
|
|||||||
*/
|
*/
|
||||||
private static void handleArguments(String[] args) {
|
private static void handleArguments(String[] args) {
|
||||||
CommandLine cl = getArgs(args);
|
CommandLine cl = getArgs(args);
|
||||||
if (cl.hasOption('h')) {
|
|
||||||
|
if (cl.hasOption('h') || args.length == 0) {
|
||||||
HelpFormatter hf = new HelpFormatter();
|
HelpFormatter hf = new HelpFormatter();
|
||||||
hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
|
hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Utils.configureLogger();
|
||||||
|
logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
|
||||||
|
|
||||||
if (cl.hasOption('w')) {
|
if (cl.hasOption('w')) {
|
||||||
Utils.setConfigBoolean("file.overwrite", true);
|
Utils.setConfigBoolean("file.overwrite", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('t')) {
|
if (cl.hasOption('t')) {
|
||||||
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
|
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('4')) {
|
if (cl.hasOption('4')) {
|
||||||
Utils.setConfigBoolean("errors.skip404", true);
|
Utils.setConfigBoolean("errors.skip404", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('r')) {
|
if (cl.hasOption('r')) {
|
||||||
// Re-rip all via command-line
|
// Re-rip all via command-line
|
||||||
List<String> history = Utils.getConfigList("download.history");
|
List<String> history = Utils.getConfigList("download.history");
|
||||||
@ -115,6 +124,7 @@ public class App {
|
|||||||
// Exit
|
// Exit
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('R')) {
|
if (cl.hasOption('R')) {
|
||||||
loadHistory();
|
loadHistory();
|
||||||
if (HISTORY.toList().isEmpty()) {
|
if (HISTORY.toList().isEmpty()) {
|
||||||
@ -146,20 +156,25 @@ public class App {
|
|||||||
System.exit(-1);
|
System.exit(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('d')) {
|
if (cl.hasOption('d')) {
|
||||||
Utils.setConfigBoolean("download.save_order", true);
|
Utils.setConfigBoolean("download.save_order", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('D')) {
|
if (cl.hasOption('D')) {
|
||||||
Utils.setConfigBoolean("download.save_order", false);
|
Utils.setConfigBoolean("download.save_order", false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
|
if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
|
||||||
logger.error("\nCannot specify '-d' and '-D' simultaneously");
|
logger.error("\nCannot specify '-d' and '-D' simultaneously");
|
||||||
System.exit(-1);
|
System.exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('l')) {
|
if (cl.hasOption('l')) {
|
||||||
// change the default rips directory
|
// change the default rips directory
|
||||||
Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
|
Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('f')) {
|
if (cl.hasOption('f')) {
|
||||||
String filename = cl.getOptionValue('f');
|
String filename = cl.getOptionValue('f');
|
||||||
try {
|
try {
|
||||||
@ -175,6 +190,7 @@ public class App {
|
|||||||
logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
|
logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cl.hasOption('u')) {
|
if (cl.hasOption('u')) {
|
||||||
String url = cl.getOptionValue('u').trim();
|
String url = cl.getOptionValue('u').trim();
|
||||||
ripURL(url, cl.hasOption("n"));
|
ripURL(url, cl.hasOption("n"));
|
||||||
|
@ -68,7 +68,13 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
Document doc = getFirstPage();
|
Document doc = getFirstPage();
|
||||||
|
|
||||||
while (doc != null) {
|
while (doc != null) {
|
||||||
|
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
||||||
|
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||||
|
break;
|
||||||
|
}
|
||||||
List<String> imageURLs = getURLsFromPage(doc);
|
List<String> imageURLs = getURLsFromPage(doc);
|
||||||
|
// If hasASAPRipping() returns true then the ripper will handle downloading the files
|
||||||
|
// if not it's done in the following block of code
|
||||||
if (!hasASAPRipping()) {
|
if (!hasASAPRipping()) {
|
||||||
// Remove all but 1 image
|
// Remove all but 1 image
|
||||||
if (isThisATest()) {
|
if (isThisATest()) {
|
||||||
|
@ -44,7 +44,8 @@ public abstract class AbstractRipper
|
|||||||
public abstract String getHost();
|
public abstract String getHost();
|
||||||
public abstract String getGID(URL url) throws MalformedURLException;
|
public abstract String getGID(URL url) throws MalformedURLException;
|
||||||
public boolean hasASAPRipping() { return false; }
|
public boolean hasASAPRipping() { return false; }
|
||||||
|
// Everytime addUrlToDownload skips a already downloaded url this increases by 1
|
||||||
|
public int alreadyDownloadedUrls = 0;
|
||||||
private boolean shouldStop = false;
|
private boolean shouldStop = false;
|
||||||
private boolean thisIsATest = false;
|
private boolean thisIsATest = false;
|
||||||
|
|
||||||
@ -60,7 +61,13 @@ public abstract class AbstractRipper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a URL to the url history file
|
||||||
|
* @param downloadedURL URL to check if downloaded
|
||||||
|
*/
|
||||||
private void writeDownloadedURL(String downloadedURL) throws IOException {
|
private void writeDownloadedURL(String downloadedURL) throws IOException {
|
||||||
|
downloadedURL = normalizeUrl(downloadedURL);
|
||||||
BufferedWriter bw = null;
|
BufferedWriter bw = null;
|
||||||
FileWriter fw = null;
|
FileWriter fw = null;
|
||||||
try {
|
try {
|
||||||
@ -85,6 +92,15 @@ public abstract class AbstractRipper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize a URL
|
||||||
|
* @param url URL to check if downloaded
|
||||||
|
*/
|
||||||
|
public String normalizeUrl(String url) {
|
||||||
|
return url;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks to see if Ripme has already downloaded a URL
|
* Checks to see if Ripme has already downloaded a URL
|
||||||
@ -95,6 +111,7 @@ public abstract class AbstractRipper
|
|||||||
*/
|
*/
|
||||||
private boolean hasDownloadedURL(String url) {
|
private boolean hasDownloadedURL(String url) {
|
||||||
File file = new File(URLHistoryFile);
|
File file = new File(URLHistoryFile);
|
||||||
|
url = normalizeUrl(url);
|
||||||
try {
|
try {
|
||||||
Scanner scanner = new Scanner(file);
|
Scanner scanner = new Scanner(file);
|
||||||
while (scanner.hasNextLine()) {
|
while (scanner.hasNextLine()) {
|
||||||
@ -157,7 +174,7 @@ public abstract class AbstractRipper
|
|||||||
* URL of the file
|
* URL of the file
|
||||||
* @param saveAs
|
* @param saveAs
|
||||||
* Path of the local file to save the content to.
|
* Path of the local file to save the content to.
|
||||||
* @return True on success, flase on failure.
|
* @return True on success, false on failure.
|
||||||
*/
|
*/
|
||||||
public abstract boolean addURLToDownload(URL url, File saveAs);
|
public abstract boolean addURLToDownload(URL url, File saveAs);
|
||||||
|
|
||||||
@ -189,14 +206,18 @@ public abstract class AbstractRipper
|
|||||||
* The HTTP referrer to use while downloading this file.
|
* The HTTP referrer to use while downloading this file.
|
||||||
* @param cookies
|
* @param cookies
|
||||||
* The cookies to send to the server while downloading this file.
|
* The cookies to send to the server while downloading this file.
|
||||||
|
* @param fileName
|
||||||
|
* The name that file will be written to
|
||||||
* @return
|
* @return
|
||||||
* True if downloaded successfully
|
* True if downloaded successfully
|
||||||
* False if failed to download
|
* False if failed to download
|
||||||
*/
|
*/
|
||||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies) {
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
||||||
|
// Don't re-add the url if it was downloaded in a previous rip
|
||||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||||
if (hasDownloadedURL(url.toExternalForm())) {
|
if (hasDownloadedURL(url.toExternalForm())) {
|
||||||
sendUpdate(STATUS.DOWNLOAD_WARN, "Already downloaded " + url.toExternalForm());
|
sendUpdate(STATUS.DOWNLOAD_WARN, "Already downloaded " + url.toExternalForm());
|
||||||
|
alreadyDownloadedUrls += 1;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -206,9 +227,18 @@ public abstract class AbstractRipper
|
|||||||
logger.debug("Ripper has been stopped");
|
logger.debug("Ripper has been stopped");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies);
|
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||||
String saveAs = url.toExternalForm();
|
String saveAs;
|
||||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
if (fileName != null) {
|
||||||
|
saveAs = fileName;
|
||||||
|
// Get the extension of the file
|
||||||
|
String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
|
||||||
|
saveAs = saveAs + "." + extension;
|
||||||
|
} else {
|
||||||
|
saveAs = url.toExternalForm();
|
||||||
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||||
|
}
|
||||||
|
|
||||||
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
||||||
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
||||||
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
||||||
@ -255,7 +285,11 @@ public abstract class AbstractRipper
|
|||||||
* @return True on success, flase on failure.
|
* @return True on success, flase on failure.
|
||||||
*/
|
*/
|
||||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
|
||||||
return addURLToDownload(url, prefix, subdirectory, null, null);
|
return addURLToDownload(url, prefix, subdirectory, null, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies) {
|
||||||
|
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -271,6 +305,8 @@ public abstract class AbstractRipper
|
|||||||
// Use empty subdirectory
|
// Use empty subdirectory
|
||||||
return addURLToDownload(url, prefix, "");
|
return addURLToDownload(url, prefix, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for downloading threads to complete.
|
* Waits for downloading threads to complete.
|
||||||
*/
|
*/
|
||||||
|
@ -11,6 +11,7 @@ import java.util.Map;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import org.jsoup.Connection.Response;
|
import org.jsoup.Connection.Response;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
@ -51,7 +52,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
Pattern p = Pattern.compile("^https?://(www\\.)?8muses\\.com/comix/album/([a-zA-Z0-9\\-_]+).*$");
|
Pattern p = Pattern.compile("^https?://(www\\.)?8muses\\.com/(comix|comics)/album/([a-zA-Z0-9\\-_]+).*$");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (!m.matches()) {
|
if (!m.matches()) {
|
||||||
throw new MalformedURLException("Expected URL format: http://www.8muses.com/index/category/albumname, got: " + url);
|
throw new MalformedURLException("Expected URL format: http://www.8muses.com/index/category/albumname, got: " + url);
|
||||||
@ -93,7 +94,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
Elements pageImages = page.getElementsByClass("c-tile");
|
Elements pageImages = page.getElementsByClass("c-tile");
|
||||||
for (Element thumb : pageImages) {
|
for (Element thumb : pageImages) {
|
||||||
// If true this link is a sub album
|
// If true this link is a sub album
|
||||||
if (thumb.attr("href").contains("/comix/album/")) {
|
if (thumb.attr("href").contains("/comics/album/")) {
|
||||||
String subUrl = "https://www.8muses.com" + thumb.attr("href");
|
String subUrl = "https://www.8muses.com" + thumb.attr("href");
|
||||||
try {
|
try {
|
||||||
logger.info("Retrieving " + subUrl);
|
logger.info("Retrieving " + subUrl);
|
||||||
@ -106,7 +107,8 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
logger.warn("Error while loading subalbum " + subUrl, e);
|
logger.warn("Error while loading subalbum " + subUrl, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (thumb.attr("href").contains("/comix/picture/")) {
|
} else if (thumb.attr("href").contains("/comics/picture/")) {
|
||||||
|
logger.info("This page is a album");
|
||||||
logger.info("Ripping image");
|
logger.info("Ripping image");
|
||||||
if (super.isStopped()) break;
|
if (super.isStopped()) break;
|
||||||
// Find thumbnail image source
|
// Find thumbnail image source
|
||||||
@ -124,7 +126,11 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
logger.info("Retrieving full-size image location from " + imageHref);
|
logger.info("Retrieving full-size image location from " + imageHref);
|
||||||
image = getFullSizeImage(imageHref);
|
image = getFullSizeImage(imageHref);
|
||||||
URL imageUrl = new URL(image);
|
URL imageUrl = new URL(image);
|
||||||
addURLToDownload(imageUrl, getPrefix(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
|
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||||
|
addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "");
|
||||||
|
} else {
|
||||||
|
addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
|
||||||
|
}
|
||||||
// X is our page index
|
// X is our page index
|
||||||
x++;
|
x++;
|
||||||
|
|
||||||
@ -177,8 +183,11 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
|
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public String getPrefixLong(int index) {
|
||||||
public String getPrefix(int index) {
|
|
||||||
return String.format("%03d_", index);
|
return String.format("%03d_", index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getPrefixShort(int index) {
|
||||||
|
return String.format("%03d", index);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -119,7 +119,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
for (Element img : imgs) {
|
for (Element img : imgs) {
|
||||||
if (img.hasClass("album-image")) {
|
if (img.hasClass("album-image")) {
|
||||||
String imageURL = img.attr("src");
|
String imageURL = img.attr("src");
|
||||||
imageURL = "https:" + imageURL;
|
imageURL = imageURL;
|
||||||
URLs.add(imageURL);
|
URLs.add(imageURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add("https:" + videoURL);
|
URLs.add(videoURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Profile videos
|
// Profile videos
|
||||||
@ -148,7 +148,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add("https:" + videoURL);
|
URLs.add(videoURL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -209,7 +209,6 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
for (Element img : imgs) {
|
for (Element img : imgs) {
|
||||||
if (img.hasClass("album-image")) {
|
if (img.hasClass("album-image")) {
|
||||||
String imageURL = img.attr("src");
|
String imageURL = img.attr("src");
|
||||||
imageURL = "https:" + imageURL;
|
|
||||||
URLs.add(new URL(imageURL));
|
URLs.add(new URL(imageURL));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +218,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
if (vid.hasClass("album-video")) {
|
if (vid.hasClass("album-video")) {
|
||||||
Elements source = vid.getElementsByTag("source");
|
Elements source = vid.getElementsByTag("source");
|
||||||
String videoURL = source.first().attr("src");
|
String videoURL = source.first().attr("src");
|
||||||
URLs.add(new URL("https:" + videoURL));
|
URLs.add(new URL(videoURL));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class FemjoyhunterRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public FemjoyhunterRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "femjoyhunter";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "femjoyhunter.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://www.femjoyhunter.com/([a-zA-Z0-9_-]+)/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected femjoyhunter URL format: " +
|
||||||
|
"femjoyhunter.com/ID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (Element el : doc.select("ul.gallery-b > li > a")) {
|
||||||
|
result.add(el.attr("href"));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
|
||||||
|
addURLToDownload(url, getPrefix(index), "", "https://a2h6m3w6.ssl.hwcdn.net/", null);
|
||||||
|
}
|
||||||
|
}
|
@ -36,6 +36,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
USER,
|
USER,
|
||||||
USER_ALBUM,
|
USER_ALBUM,
|
||||||
USER_IMAGES,
|
USER_IMAGES,
|
||||||
|
SINGLE_IMAGE,
|
||||||
SERIES_OF_IMAGES,
|
SERIES_OF_IMAGES,
|
||||||
SUBREDDIT
|
SUBREDDIT
|
||||||
}
|
}
|
||||||
@ -155,34 +156,48 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
switch (albumType) {
|
switch (albumType) {
|
||||||
case ALBUM:
|
case ALBUM:
|
||||||
// Fall-through
|
// Fall-through
|
||||||
case USER_ALBUM:
|
case USER_ALBUM:
|
||||||
logger.info("Album type is USER_ALBUM");
|
logger.info("Album type is USER_ALBUM");
|
||||||
// Don't call getAlbumTitle(this.url) with this
|
// Don't call getAlbumTitle(this.url) with this
|
||||||
// as it seems to cause the album to be downloaded to a subdir.
|
// as it seems to cause the album to be downloaded to a subdir.
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case SERIES_OF_IMAGES:
|
case SERIES_OF_IMAGES:
|
||||||
logger.info("Album type is SERIES_OF_IMAGES");
|
logger.info("Album type is SERIES_OF_IMAGES");
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case USER:
|
case SINGLE_IMAGE:
|
||||||
logger.info("Album type is USER");
|
logger.info("Album type is SINGLE_IMAGE");
|
||||||
ripUserAccount(url);
|
ripSingleImage(this.url);
|
||||||
break;
|
break;
|
||||||
case SUBREDDIT:
|
case USER:
|
||||||
logger.info("Album type is SUBREDDIT");
|
logger.info("Album type is USER");
|
||||||
ripSubreddit(url);
|
ripUserAccount(url);
|
||||||
break;
|
break;
|
||||||
case USER_IMAGES:
|
case SUBREDDIT:
|
||||||
logger.info("Album type is USER_IMAGES");
|
logger.info("Album type is SUBREDDIT");
|
||||||
ripUserImages(url);
|
ripSubreddit(url);
|
||||||
break;
|
break;
|
||||||
|
case USER_IMAGES:
|
||||||
|
logger.info("Album type is USER_IMAGES");
|
||||||
|
ripUserImages(url);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void ripSingleImage(URL url) throws IOException {
|
||||||
|
String strUrl = url.toExternalForm();
|
||||||
|
Document document = getDocument(strUrl);
|
||||||
|
Matcher m = getEmbeddedJsonMatcher(document);
|
||||||
|
if (m.matches()) {
|
||||||
|
JSONObject json = new JSONObject(m.group(1)).getJSONObject("image");
|
||||||
|
addURLToDownload(extractImageUrlFromJson(json), "");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void ripAlbum(URL url) throws IOException {
|
private void ripAlbum(URL url) throws IOException {
|
||||||
ripAlbum(url, "");
|
ripAlbum(url, "");
|
||||||
}
|
}
|
||||||
@ -257,38 +272,16 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
strUrl += "/all";
|
strUrl += "/all";
|
||||||
}
|
}
|
||||||
logger.info(" Retrieving " + strUrl);
|
logger.info(" Retrieving " + strUrl);
|
||||||
Document doc = Jsoup.connect(strUrl)
|
Document doc = getDocument(strUrl);
|
||||||
.userAgent(USER_AGENT)
|
|
||||||
.timeout(10 * 1000)
|
|
||||||
.maxBodySize(0)
|
|
||||||
.get();
|
|
||||||
|
|
||||||
// Try to use embedded JSON to retrieve images
|
// Try to use embedded JSON to retrieve images
|
||||||
Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
|
Matcher m = getEmbeddedJsonMatcher(doc);
|
||||||
Matcher m = p.matcher(doc.body().html());
|
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
try {
|
try {
|
||||||
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
|
||||||
JSONObject json = new JSONObject(m.group(1));
|
JSONObject json = new JSONObject(m.group(1));
|
||||||
JSONArray images = json.getJSONObject("image")
|
JSONArray jsonImages = json.getJSONObject("image")
|
||||||
.getJSONObject("album_images")
|
.getJSONObject("album_images")
|
||||||
.getJSONArray("images");
|
.getJSONArray("images");
|
||||||
int imagesLength = images.length();
|
return createImgurAlbumFromJsonArray(url, jsonImages);
|
||||||
for (int i = 0; i < imagesLength; i++) {
|
|
||||||
JSONObject image = images.getJSONObject(i);
|
|
||||||
String ext = image.getString("ext");
|
|
||||||
if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
|
||||||
ext = ".mp4";
|
|
||||||
}
|
|
||||||
URL imageURL = new URL(
|
|
||||||
"http://i.imgur.com/"
|
|
||||||
+ image.getString("hash")
|
|
||||||
+ ext);
|
|
||||||
ImgurImage imgurImage = new ImgurImage(imageURL);
|
|
||||||
imgurImage.extension = ext;
|
|
||||||
imgurAlbum.addImage(imgurImage);
|
|
||||||
}
|
|
||||||
return imgurAlbum;
|
|
||||||
} catch (JSONException e) {
|
} catch (JSONException e) {
|
||||||
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
||||||
}
|
}
|
||||||
@ -330,6 +323,44 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
return imgurAlbum;
|
return imgurAlbum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static Matcher getEmbeddedJsonMatcher(Document doc) {
|
||||||
|
Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL);
|
||||||
|
return p.matcher(doc.body().html());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException {
|
||||||
|
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
||||||
|
int imagesLength = jsonImages.length();
|
||||||
|
for (int i = 0; i < imagesLength; i++) {
|
||||||
|
JSONObject jsonImage = jsonImages.getJSONObject(i);
|
||||||
|
imgurAlbum.addImage(createImgurImageFromJson(jsonImage));
|
||||||
|
}
|
||||||
|
return imgurAlbum;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ImgurImage createImgurImageFromJson(JSONObject json) throws MalformedURLException {
|
||||||
|
return new ImgurImage(extractImageUrlFromJson(json));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException {
|
||||||
|
String ext = json.getString("ext");
|
||||||
|
if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
||||||
|
ext = ".mp4";
|
||||||
|
}
|
||||||
|
return new URL(
|
||||||
|
"http://i.imgur.com/"
|
||||||
|
+ json.getString("hash")
|
||||||
|
+ ext);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Document getDocument(String strUrl) throws IOException {
|
||||||
|
return Jsoup.connect(strUrl)
|
||||||
|
.userAgent(USER_AGENT)
|
||||||
|
.timeout(10 * 1000)
|
||||||
|
.maxBodySize(0)
|
||||||
|
.get();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Rips all albums in an imgur user's account.
|
* Rips all albums in an imgur user's account.
|
||||||
* @param url
|
* @param url
|
||||||
@ -507,6 +538,13 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid);
|
this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid);
|
||||||
return "r_" + subreddit + "_" + gid;
|
return "r_" + subreddit + "_" + gid;
|
||||||
}
|
}
|
||||||
|
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$");
|
||||||
|
m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
// Single imgur image
|
||||||
|
albumType = ALBUM_TYPE.SINGLE_IMAGE;
|
||||||
|
return m.group(m.groupCount());
|
||||||
|
}
|
||||||
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$");
|
p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$");
|
||||||
m = p.matcher(url.toExternalForm());
|
m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
|
@ -1,14 +1,19 @@
|
|||||||
package com.rarchives.ripme.ripper.rippers;
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.net.URLConnection;
|
||||||
import java.time.*;
|
import java.time.*;
|
||||||
import java.time.format.DateTimeFormatter;
|
import java.time.format.DateTimeFormatter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
import java.security.*;
|
||||||
|
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONException;
|
import org.json.JSONException;
|
||||||
@ -17,15 +22,25 @@ import org.json.JSONObject;
|
|||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
import org.jsoup.Connection;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import java.util.HashMap;
|
||||||
|
|
||||||
|
|
||||||
public class InstagramRipper extends AbstractHTMLRipper {
|
public class InstagramRipper extends AbstractHTMLRipper {
|
||||||
|
String nextPageID = "";
|
||||||
|
private String qHash;
|
||||||
|
private boolean rippingTag = false;
|
||||||
|
private String tagName;
|
||||||
|
|
||||||
private String userID;
|
private String userID;
|
||||||
|
private String rhx_gis = null;
|
||||||
|
private String csrftoken;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public InstagramRipper(URL url) throws IOException {
|
public InstagramRipper(URL url) throws IOException {
|
||||||
super(url);
|
super(url);
|
||||||
@ -52,6 +67,12 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
return san_url;
|
return san_url;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String normalizeUrl(String url) {
|
||||||
|
// Remove the date sig from the url
|
||||||
|
return url.replaceAll("/[A-Z0-9]{8}/", "/");
|
||||||
|
}
|
||||||
|
|
||||||
private List<String> getPostsFromSinglePage(Document Doc) {
|
private List<String> getPostsFromSinglePage(Document Doc) {
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
JSONArray datas;
|
JSONArray datas;
|
||||||
@ -123,13 +144,31 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
p = Pattern.compile("^https?://www.instagram.com/explore/tags/([^/]+)/?");
|
p = Pattern.compile("^https?://www.instagram.com/explore/tags/([^/]+)/?");
|
||||||
m = p.matcher(url.toExternalForm());
|
m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
|
rippingTag = true;
|
||||||
|
tagName = m.group(1);
|
||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new MalformedURLException("Unable to find user in " + url);
|
throw new MalformedURLException("Unable to find user in " + url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String stripHTMLTags(String t) {
|
||||||
|
t = t.replaceAll("<html>\n" +
|
||||||
|
" <head></head>\n" +
|
||||||
|
" <body>", "");
|
||||||
|
t.replaceAll("</body>\n" +
|
||||||
|
"</html>", "");
|
||||||
|
t = t.replaceAll("\n", "");
|
||||||
|
t = t.replaceAll("=\"\"", "");
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private JSONObject getJSONFromPage(Document firstPage) throws IOException {
|
private JSONObject getJSONFromPage(Document firstPage) throws IOException {
|
||||||
|
// Check if this page is HTML + JSON or jsut json
|
||||||
|
if (!firstPage.html().contains("window._sharedData =")) {
|
||||||
|
return new JSONObject(stripHTMLTags(firstPage.html()));
|
||||||
|
}
|
||||||
String jsonText = "";
|
String jsonText = "";
|
||||||
try {
|
try {
|
||||||
for (Element script : firstPage.select("script[type=text/javascript]")) {
|
for (Element script : firstPage.select("script[type=text/javascript]")) {
|
||||||
@ -146,8 +185,13 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
userID = getGID(url);
|
Connection.Response resp = Http.url(url).response();
|
||||||
return Http.url(url).get();
|
logger.info(resp.cookies());
|
||||||
|
csrftoken = resp.cookie("csrftoken");
|
||||||
|
Document p = resp.parse();
|
||||||
|
// Get the query hash so we can download the next page
|
||||||
|
qHash = getQHash(p);
|
||||||
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getVideoFromPage(String videoID) {
|
private String getVideoFromPage(String videoID) {
|
||||||
@ -192,7 +236,6 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
String nextPageID = "";
|
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
JSONObject json = new JSONObject();
|
JSONObject json = new JSONObject();
|
||||||
try {
|
try {
|
||||||
@ -201,33 +244,53 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
logger.warn("Unable to exact json from page");
|
logger.warn("Unable to exact json from page");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get the rhx_gis value so we can get the next page later on
|
||||||
|
if (rhx_gis == null) {
|
||||||
|
rhx_gis = json.getString("rhx_gis");
|
||||||
|
}
|
||||||
if (!url.toExternalForm().contains("/p/")) {
|
if (!url.toExternalForm().contains("/p/")) {
|
||||||
JSONArray datas = new JSONArray();
|
JSONArray datas = new JSONArray();
|
||||||
try {
|
if (!rippingTag) {
|
||||||
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
// This first try only works on data from the first page
|
||||||
datas = profilePage.getJSONObject(0).getJSONObject("user").getJSONObject("media").getJSONArray("nodes");
|
try {
|
||||||
} catch (JSONException e) {
|
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
||||||
// Handle hashtag pages
|
userID = profilePage.getJSONObject(0).getString("logging_page_id").replaceAll("profilePage_", "");
|
||||||
datas = json.getJSONObject("entry_data").getJSONArray("TagPage").getJSONObject(0)
|
datas = profilePage.getJSONObject(0).getJSONObject("graphql").getJSONObject("user")
|
||||||
.getJSONObject("tag").getJSONObject("media").getJSONArray("nodes");
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||||
|
} catch (JSONException e) {
|
||||||
|
datas = json.getJSONObject("data").getJSONObject("user")
|
||||||
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
JSONArray tagPage = json.getJSONObject("entry_data").getJSONArray("TagPage");
|
||||||
|
datas = tagPage.getJSONObject(0).getJSONObject("graphql").getJSONObject("hashtag")
|
||||||
|
.getJSONObject("edge_hashtag_to_media").getJSONArray("edges");
|
||||||
|
} catch (JSONException e) {
|
||||||
|
datas = json.getJSONObject("data").getJSONObject("hashtag").getJSONObject("edge_hashtag_to_media")
|
||||||
|
.getJSONArray("edges");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < datas.length(); i++) {
|
for (int i = 0; i < datas.length(); i++) {
|
||||||
JSONObject data = (JSONObject) datas.get(i);
|
JSONObject data = (JSONObject) datas.get(i);
|
||||||
Long epoch = data.getLong("date");
|
data = data.getJSONObject("node");
|
||||||
|
Long epoch = data.getLong("taken_at_timestamp");
|
||||||
Instant instant = Instant.ofEpochSecond(epoch);
|
Instant instant = Instant.ofEpochSecond(epoch);
|
||||||
String image_date = DateTimeFormatter.ofPattern("yyyy_MM_dd_hh:mm_").format(ZonedDateTime.ofInstant(instant, ZoneOffset.UTC));
|
String image_date = DateTimeFormatter.ofPattern("yyyy_MM_dd_hh:mm_").format(ZonedDateTime.ofInstant(instant, ZoneOffset.UTC));
|
||||||
if (data.getString("__typename").equals("GraphSidecar")) {
|
// It looks like tag pages don't have the __typename key
|
||||||
try {
|
if (!rippingTag) {
|
||||||
Document slideShowDoc = Http.url(new URL ("https://www.instagram.com/p/" + data.getString("code"))).get();
|
if (data.getString("__typename").equals("GraphSidecar")) {
|
||||||
List<String> toAdd = getPostsFromSinglePage(slideShowDoc);
|
try {
|
||||||
for (int slideShowInt=0; slideShowInt<toAdd.size(); slideShowInt++) {
|
Document slideShowDoc = Http.url(new URL("https://www.instagram.com/p/" + data.getString("shortcode"))).get();
|
||||||
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("code"));
|
List<String> toAdd = getPostsFromSinglePage(slideShowDoc);
|
||||||
|
for (int slideShowInt = 0; slideShowInt < toAdd.size(); slideShowInt++) {
|
||||||
|
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
||||||
|
}
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.error("Unable to download slide show, URL was malformed");
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.error("Unable to download slide show");
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException e) {
|
|
||||||
logger.error("Unable to download slide show, URL was malformed");
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.error("Unable to download slide show");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
@ -235,14 +298,14 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
if (imageURLs.size() == 0) {
|
if (imageURLs.size() == 0) {
|
||||||
// We add this one item to the array because either wise
|
// We add this one item to the array because either wise
|
||||||
// the ripper will error out because we returned an empty array
|
// the ripper will error out because we returned an empty array
|
||||||
imageURLs.add(getOriginalUrl(data.getString("thumbnail_src")));
|
imageURLs.add(getOriginalUrl(data.getString("display_url")));
|
||||||
}
|
}
|
||||||
addURLToDownload(new URL(getOriginalUrl(data.getString("thumbnail_src"))), image_date);
|
addURLToDownload(new URL(data.getString("display_url")), image_date);
|
||||||
} else {
|
} else {
|
||||||
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
||||||
addURLToDownload(new URL(getVideoFromPage(data.getString("code"))), image_date);
|
addURLToDownload(new URL(getVideoFromPage(data.getString("shortcode"))), image_date);
|
||||||
} else {
|
} else {
|
||||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping video " + data.getString("code"));
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping video " + data.getString("shortcode"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
@ -255,33 +318,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Rip the next page
|
|
||||||
if (!nextPageID.equals("") && !isThisATest()) {
|
|
||||||
if (url.toExternalForm().contains("/tags/")) {
|
|
||||||
try {
|
|
||||||
// Sleep for a while to avoid a ban
|
|
||||||
sleep(2500);
|
|
||||||
if (url.toExternalForm().substring(url.toExternalForm().length() - 1).equals("/")) {
|
|
||||||
getURLsFromPage(Http.url(url.toExternalForm() + "?max_id=" + nextPageID).get());
|
|
||||||
} else {
|
|
||||||
getURLsFromPage(Http.url(url.toExternalForm() + "/?max_id=" + nextPageID).get());
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (IOException e) {
|
|
||||||
return imageURLs;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
// Sleep for a while to avoid a ban
|
|
||||||
sleep(2500);
|
|
||||||
getURLsFromPage(Http.url("https://www.instagram.com/" + userID + "/?max_id=" + nextPageID).get());
|
|
||||||
} catch (IOException e) {
|
|
||||||
return imageURLs;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logger.warn("Can't get net page");
|
|
||||||
}
|
|
||||||
} else { // We're ripping from a single page
|
} else { // We're ripping from a single page
|
||||||
logger.info("Ripping from single page");
|
logger.info("Ripping from single page");
|
||||||
imageURLs = getPostsFromSinglePage(doc);
|
imageURLs = getPostsFromSinglePage(doc);
|
||||||
@ -290,9 +327,124 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
return imageURLs;
|
return imageURLs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getIGGis(String variables) {
|
||||||
|
String stringToMD5 = rhx_gis + ":" + csrftoken + ":" + variables;
|
||||||
|
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||||
|
try {
|
||||||
|
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
||||||
|
|
||||||
|
MessageDigest md = MessageDigest.getInstance("MD5");
|
||||||
|
byte[] hash = md.digest(bytesOfMessage);
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
for (int i = 0; i < hash.length; ++i) {
|
||||||
|
sb.append(Integer.toHexString((hash[i] & 0xFF) | 0x100).substring(1,3));
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
|
} catch(UnsupportedEncodingException e) {
|
||||||
|
return null;
|
||||||
|
} catch(NoSuchAlgorithmException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
Document toreturn;
|
||||||
|
java.util.Map<String, String> cookies = new HashMap<String, String>();
|
||||||
|
// This shouldn't be hardcoded and will break one day
|
||||||
|
cookies.put("ig_pr", "1");
|
||||||
|
cookies.put("csrftoken", csrftoken);
|
||||||
|
if (!nextPageID.equals("") && !isThisATest()) {
|
||||||
|
if (rippingTag) {
|
||||||
|
try {
|
||||||
|
sleep(2500);
|
||||||
|
String vars = "{\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}";
|
||||||
|
String ig_gis = getIGGis(vars);
|
||||||
|
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
||||||
|
"&variables=" + vars).header("x-instagram-gis", ig_gis).cookies(cookies).ignoreContentType().get();
|
||||||
|
// Sleep for a while to avoid a ban
|
||||||
|
logger.info(toreturn.html());
|
||||||
|
return toreturn;
|
||||||
|
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
// Sleep for a while to avoid a ban
|
||||||
|
sleep(2500);
|
||||||
|
String vars = "{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}";
|
||||||
|
String ig_gis = getIGGis(vars);
|
||||||
|
logger.info(ig_gis);
|
||||||
|
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars
|
||||||
|
).header("x-instagram-gis", ig_gis).cookies(cookies).ignoreContentType().get();
|
||||||
|
if (!pageHasImages(toreturn)) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
return toreturn;
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void downloadURL(URL url, int index) {
|
public void downloadURL(URL url, int index) {
|
||||||
addURLToDownload(url);
|
addURLToDownload(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean pageHasImages(Document doc) {
|
||||||
|
JSONObject json = new JSONObject(stripHTMLTags(doc.html()));
|
||||||
|
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
||||||
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
||||||
|
if (numberOfImages == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getQHash(Document doc) {
|
||||||
|
String jsFileURL = "https://www.instagram.com" + doc.select("link[rel=preload]").attr("href");
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
Document jsPage;
|
||||||
|
try {
|
||||||
|
// We can't use Jsoup here because it won't download a non-html file larger than a MB
|
||||||
|
// even if you set maxBodySize to 0
|
||||||
|
URLConnection connection = new URL(jsFileURL).openConnection();
|
||||||
|
BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
|
||||||
|
String line;
|
||||||
|
while ((line = in.readLine()) != null) {
|
||||||
|
sb.append(line);
|
||||||
|
}
|
||||||
|
in.close();
|
||||||
|
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
||||||
|
return null;
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info("Unable to get query_hash");
|
||||||
|
logger.info(e.getMessage());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (!rippingTag) {
|
||||||
|
Pattern jsP = Pattern.compile("o},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
Matcher m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
Matcher m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.info("Could not find query_hash on " + jsFileURL);
|
||||||
|
return null;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
public class ModelxRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public ModelxRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "modelx";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "modelx.org";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("^.*modelx.org/.*/(.+)$");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new MalformedURLException("Expected URL format: http://www.modelx.org/[category (one or more)]/xxxxx got: " + url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document page) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
|
||||||
|
for (Element el : page.select(".gallery-icon > a")) {
|
||||||
|
result.add(el.attr("href"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -4,10 +4,13 @@ import java.io.File;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONObject;
|
import org.json.JSONObject;
|
||||||
import org.json.JSONTokener;
|
import org.json.JSONTokener;
|
||||||
@ -17,6 +20,9 @@ import com.rarchives.ripme.ui.UpdateUtils;
|
|||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
import com.rarchives.ripme.utils.RipUtils;
|
import com.rarchives.ripme.utils.RipUtils;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
public class RedditRipper extends AlbumRipper {
|
public class RedditRipper extends AlbumRipper {
|
||||||
|
|
||||||
|
@ -0,0 +1,80 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class SinfestRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public SinfestRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "sinfest";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "sinfest.net";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://sinfest.net/view.php\\?date=([0-9-]*)/?");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected sinfest URL format: " +
|
||||||
|
"sinfest.net/view.php?date=XXXX-XX-XX/ - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
|
Element elem = doc.select("td.style5 > a > img").last();
|
||||||
|
logger.info(elem.parent().attr("href"));
|
||||||
|
if (elem == null || elem.parent().attr("href").equals("view.php?date=")) {
|
||||||
|
throw new IOException("No more pages");
|
||||||
|
}
|
||||||
|
String nextPage = elem.parent().attr("href");
|
||||||
|
// Some times this returns a empty string
|
||||||
|
// This for stops that
|
||||||
|
if (nextPage == "") {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Http.url("http://sinfest.net/" + nextPage).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
Element elem = doc.select("tbody > tr > td > img").last();
|
||||||
|
result.add("http://sinfest.net/" + elem.attr("src"));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
112
src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java
Normal file
112
src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.jsoup.Connection;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class StaRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
public StaRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String,String> cookies = new HashMap<>();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "sta";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "sta.sh";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https://sta.sh/([A-Za-z0-9]+)");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected sta.sh URL format: " +
|
||||||
|
"sta.sh/ALBUMID - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
// "url" is an instance field of the superclass
|
||||||
|
return Http.url(url).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (Element el : doc.select("span > span > a.thumb")) {
|
||||||
|
String thumbPageURL = el.attr("href");
|
||||||
|
Document thumbPage = null;
|
||||||
|
if (checkURL(thumbPageURL)) {
|
||||||
|
try {
|
||||||
|
Connection.Response resp = Http.url(new URL(thumbPageURL)).response();
|
||||||
|
cookies.putAll(resp.cookies());
|
||||||
|
thumbPage = resp.parse();
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.info(thumbPageURL + " is a malformed URL");
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info(e.getMessage());
|
||||||
|
}
|
||||||
|
String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href");
|
||||||
|
if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) {
|
||||||
|
result.add(getImageLinkFromDLLink(imageDownloadUrl));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean checkURL(String url) {
|
||||||
|
try {
|
||||||
|
new URL(url);
|
||||||
|
return true;
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getImageLinkFromDLLink(String url) {
|
||||||
|
try {
|
||||||
|
Connection.Response response = Jsoup.connect(url)
|
||||||
|
.userAgent(USER_AGENT)
|
||||||
|
.timeout(10000)
|
||||||
|
.cookies(cookies)
|
||||||
|
.followRedirects(false)
|
||||||
|
.execute();
|
||||||
|
String imageURL = response.header("Location");
|
||||||
|
logger.info(imageURL);
|
||||||
|
return imageURL;
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info("Got error message " + e.getMessage() + " trying to download " + url);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,108 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLEncoder;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
|
import org.json.JSONArray;
|
||||||
|
import org.json.JSONObject;
|
||||||
|
import org.jsoup.Connection;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class TsuminoRipper extends AbstractHTMLRipper {
|
||||||
|
private Map<String,String> cookies = new HashMap<>();
|
||||||
|
|
||||||
|
public TsuminoRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
private JSONArray getPageUrls() {
|
||||||
|
String postURL = "http://www.tsumino.com/Read/Load";
|
||||||
|
try {
|
||||||
|
// This sessionId will expire and need to be replaced
|
||||||
|
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
|
||||||
|
logger.info(cookies);
|
||||||
|
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
|
||||||
|
String jsonInfo = doc.html().replaceAll("<html>","").replaceAll("<head></head>", "").replaceAll("<body>", "").replaceAll("</body>", "")
|
||||||
|
.replaceAll("</html>", "").replaceAll("\n", "");
|
||||||
|
logger.info(jsonInfo);
|
||||||
|
JSONObject json = new JSONObject(jsonInfo);
|
||||||
|
logger.info(json.getJSONArray("reader_page_urls"));
|
||||||
|
return json.getJSONArray("reader_page_urls");
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info(e);
|
||||||
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
|
||||||
|
+ getAlbumID() + " and try again");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "tsumino";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "tsumino.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1) + "_" + m.group(2);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected tsumino URL format: " +
|
||||||
|
"tsumino.com/Book/Info/ID/TITLE - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getAlbumID() {
|
||||||
|
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/\\S*");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
Connection.Response resp = Http.url(url).response();
|
||||||
|
cookies.putAll(resp.cookies());
|
||||||
|
logger.info(resp.parse());
|
||||||
|
return resp.parse();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
JSONArray imageIds = getPageUrls();
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (int i = 0; i < imageIds.length(); i++) {
|
||||||
|
result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
sleep(1000);
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -37,6 +37,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
private static List<String> explicit_domains = Arrays.asList(
|
private static List<String> explicit_domains = Arrays.asList(
|
||||||
"www.totempole666.com",
|
"www.totempole666.com",
|
||||||
"buttsmithy.com",
|
"buttsmithy.com",
|
||||||
|
"incase.buttsmithy.com",
|
||||||
"themonsterunderthebed.net",
|
"themonsterunderthebed.net",
|
||||||
"prismblush.com",
|
"prismblush.com",
|
||||||
"www.konradokonski.com",
|
"www.konradokonski.com",
|
||||||
@ -87,6 +88,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern buttsmithyIncasePat = Pattern.compile("https?://incase.buttsmithy.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
|
Matcher buttsmithyIncaseMat = buttsmithyIncasePat.matcher(url.toExternalForm());
|
||||||
|
if (buttsmithyIncaseMat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
|
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
|
||||||
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
|
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
|
||||||
if (theMonsterUnderTheBedMat.matches()) {
|
if (theMonsterUnderTheBedMat.matches()) {
|
||||||
@ -178,6 +185,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + prismblushMat.group(1);
|
return getHost() + "_" + prismblushMat.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern buttsmithyIncasePat = Pattern.compile("https?://incase.buttsmithy.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
|
Matcher buttsmithyIncaseMat = buttsmithyIncasePat.matcher(url.toExternalForm());
|
||||||
|
if (buttsmithyIncaseMat.matches()) {
|
||||||
|
return getHost() + "_" + buttsmithyIncaseMat.group(1).replaceAll("-page-\\d", "").replaceAll("-pg-\\d", "");
|
||||||
|
}
|
||||||
|
|
||||||
Pattern comicsxxxPat = Pattern.compile("https?://comics-xxx.com/([a-zA-Z0-9_\\-]*)/?$");
|
Pattern comicsxxxPat = Pattern.compile("https?://comics-xxx.com/([a-zA-Z0-9_\\-]*)/?$");
|
||||||
Matcher comicsxxxMat = comicsxxxPat.matcher(url.toExternalForm());
|
Matcher comicsxxxMat = comicsxxxPat.matcher(url.toExternalForm());
|
||||||
if (comicsxxxMat.matches()) {
|
if (comicsxxxMat.matches()) {
|
||||||
|
@ -57,19 +57,21 @@ public class XvideosRipper extends VideoRipper {
|
|||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url);
|
logger.info(" Retrieving " + this.url);
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
Elements embeds = doc.select("embed");
|
Elements scripts = doc.select("script");
|
||||||
if (embeds.size() == 0) {
|
for (Element e : scripts) {
|
||||||
throw new IOException("Could not find Embed code at " + url);
|
if (e.html().contains("html5player.setVideoUrlHigh")) {
|
||||||
}
|
logger.info("Found the right script");
|
||||||
Element embed = embeds.get(0);
|
String[] lines = e.html().split("\n");
|
||||||
String vars = embed.attr("flashvars");
|
for (String line: lines) {
|
||||||
for (String var : vars.split("&")) {
|
if (line.contains("html5player.setVideoUrlHigh")) {
|
||||||
if (var.startsWith("flv_url=")) {
|
String videoURL = line.replaceAll("\t", "").replaceAll("html5player.setVideoUrlHigh\\(", "").replaceAll("\'", "").replaceAll("\\);", "");
|
||||||
String vidUrl = var.substring("flv_url=".length());
|
addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
|
||||||
vidUrl = URLDecoder.decode(vidUrl, "UTF-8");
|
waitForThreads();
|
||||||
addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
waitForThreads();
|
throw new IOException("Unable to find video url at " + this.url.toExternalForm());
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
public class UpdateUtils {
|
public class UpdateUtils {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||||
private static final String DEFAULT_VERSION = "1.7.19";
|
private static final String DEFAULT_VERSION = "1.7.33";
|
||||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||||
private static final String mainFileName = "ripme.jar";
|
private static final String mainFileName = "ripme.jar";
|
||||||
|
@ -9,19 +9,18 @@ import java.util.List;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.EromeRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||||
import org.apache.commons.lang.math.NumberUtils;
|
import org.apache.commons.lang.math.NumberUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurImage;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
|
||||||
|
|
||||||
public class RipUtils {
|
public class RipUtils {
|
||||||
private static final Logger logger = Logger.getLogger(RipUtils.class);
|
private static final Logger logger = Logger.getLogger(RipUtils.class);
|
||||||
@ -35,8 +34,8 @@ public class RipUtils {
|
|||||||
&& url.toExternalForm().contains("imgur.com/a/")) {
|
&& url.toExternalForm().contains("imgur.com/a/")) {
|
||||||
try {
|
try {
|
||||||
logger.debug("Fetching imgur album at " + url);
|
logger.debug("Fetching imgur album at " + url);
|
||||||
ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
|
ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurAlbum(url);
|
||||||
for (ImgurImage imgurImage : imgurAlbum.images) {
|
for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
|
||||||
logger.debug("Got imgur image: " + imgurImage.url);
|
logger.debug("Got imgur image: " + imgurImage.url);
|
||||||
result.add(imgurImage.url);
|
result.add(imgurImage.url);
|
||||||
}
|
}
|
||||||
@ -49,8 +48,8 @@ public class RipUtils {
|
|||||||
// Imgur image series.
|
// Imgur image series.
|
||||||
try {
|
try {
|
||||||
logger.debug("Fetching imgur series at " + url);
|
logger.debug("Fetching imgur series at " + url);
|
||||||
ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
|
ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
|
||||||
for (ImgurImage imgurImage : imgurAlbum.images) {
|
for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) {
|
||||||
logger.debug("Got imgur image: " + imgurImage.url);
|
logger.debug("Got imgur image: " + imgurImage.url);
|
||||||
result.add(imgurImage.url);
|
result.add(imgurImage.url);
|
||||||
}
|
}
|
||||||
@ -91,6 +90,21 @@ public class RipUtils {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else if (url.toExternalForm().contains("erome.com")) {
|
||||||
|
try {
|
||||||
|
logger.info("Getting eroshare album " + url);
|
||||||
|
EromeRipper r = new EromeRipper(url);
|
||||||
|
Document tempDoc = r.getFirstPage();
|
||||||
|
for (String u : r.getURLsFromPage(tempDoc)) {
|
||||||
|
result.add(new URL(u));
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Do nothing
|
||||||
|
logger.warn("Exception while retrieving eroshare page:", e);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*");
|
Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
@ -122,8 +136,8 @@ public class RipUtils {
|
|||||||
try {
|
try {
|
||||||
// Fetch the page
|
// Fetch the page
|
||||||
Document doc = Jsoup.connect(url.toExternalForm())
|
Document doc = Jsoup.connect(url.toExternalForm())
|
||||||
.userAgent(AbstractRipper.USER_AGENT)
|
.userAgent(AbstractRipper.USER_AGENT)
|
||||||
.get();
|
.get();
|
||||||
for (Element el : doc.select("meta")) {
|
for (Element el : doc.select("meta")) {
|
||||||
if (el.attr("name").equals("twitter:image:src")) {
|
if (el.attr("name").equals("twitter:image:src")) {
|
||||||
result.add(new URL(el.attr("content")));
|
result.add(new URL(el.attr("content")));
|
||||||
|
@ -10,6 +10,9 @@ public class EightmusesRipperTest extends RippersTest {
|
|||||||
// A simple image album
|
// A simple image album
|
||||||
EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
|
EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
|
// Test the new url format
|
||||||
|
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
|
||||||
|
testRipper(ripper);
|
||||||
// Test pages with subalbums
|
// Test pages with subalbums
|
||||||
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
|
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.FemjoyhunterRipper;
|
||||||
|
|
||||||
|
public class FemjoyhunterRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
FemjoyhunterRipper ripper = new FemjoyhunterRipper(new URL("https://www.femjoyhunter.com/alisa-i-got-nice-big-breasts-and-fine-ass-so-she-seems-to-be-a-hottest-brunette-5936/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -1,13 +1,15 @@
|
|||||||
package com.rarchives.ripme.tst.ripper.rippers;
|
//package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
//
|
||||||
|
//import java.io.IOException;
|
||||||
|
//import java.net.URL;
|
||||||
|
//
|
||||||
|
//import com.rarchives.ripme.ripper.rippers.FivehundredpxRipper;
|
||||||
|
//
|
||||||
|
//public class FivehundredpxRipperTest extends RippersTest {
|
||||||
|
// public void test500pxAlbum() throws IOException {
|
||||||
|
// FivehundredpxRipper ripper = new FivehundredpxRipper(new URL("https://marketplace.500px.com/alexander_hurman"));
|
||||||
|
// testRipper(ripper);
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
|
||||||
import java.io.IOException;
|
// Ripper is broken. See https://github.com/RipMeApp/ripme/issues/438
|
||||||
import java.net.URL;
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.FivehundredpxRipper;
|
|
||||||
|
|
||||||
public class FivehundredpxRipperTest extends RippersTest {
|
|
||||||
public void test500pxAlbum() throws IOException {
|
|
||||||
FivehundredpxRipper ripper = new FivehundredpxRipper(new URL("https://marketplace.500px.com/alexander_hurman"));
|
|
||||||
testRipper(ripper);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,13 +1,13 @@
|
|||||||
package com.rarchives.ripme.tst.ripper.rippers;
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper.ImgurAlbum;
|
|
||||||
|
|
||||||
public class ImgurRipperTest extends RippersTest {
|
public class ImgurRipperTest extends RippersTest {
|
||||||
|
|
||||||
public void testImgurURLFailures() throws IOException {
|
public void testImgurURLFailures() throws IOException {
|
||||||
@ -17,7 +17,6 @@ public class ImgurRipperTest extends RippersTest {
|
|||||||
failURLs.add(new URL("http://imgur.com/"));
|
failURLs.add(new URL("http://imgur.com/"));
|
||||||
failURLs.add(new URL("http://i.imgur.com"));
|
failURLs.add(new URL("http://i.imgur.com"));
|
||||||
failURLs.add(new URL("http://i.imgur.com/"));
|
failURLs.add(new URL("http://i.imgur.com/"));
|
||||||
failURLs.add(new URL("http://imgur.com/image"));
|
|
||||||
failURLs.add(new URL("http://imgur.com/image.jpg"));
|
failURLs.add(new URL("http://imgur.com/image.jpg"));
|
||||||
failURLs.add(new URL("http://i.imgur.com/image.jpg"));
|
failURLs.add(new URL("http://i.imgur.com/image.jpg"));
|
||||||
for (URL url : failURLs) {
|
for (URL url : failURLs) {
|
||||||
@ -50,6 +49,15 @@ public class ImgurRipperTest extends RippersTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testImgurSingleImage() throws IOException {
|
||||||
|
List<URL> contentURLs = new ArrayList<>();
|
||||||
|
contentURLs.add(new URL("http://imgur.com/qbfcLyG")); // Single image URL
|
||||||
|
contentURLs.add(new URL("https://imgur.com/KexUO")); // Single image URL
|
||||||
|
for (URL url : contentURLs) {
|
||||||
|
ImgurRipper ripper = new ImgurRipper(url);
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testImgurAlbumWithMoreThan20Pictures() throws IOException {
|
public void testImgurAlbumWithMoreThan20Pictures() throws IOException {
|
||||||
ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq"));
|
ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq"));
|
||||||
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ModelxRipper;
|
||||||
|
|
||||||
|
public class ModelxRipperTest extends RippersTest {
|
||||||
|
public void testModelxAlbum() throws IOException {
|
||||||
|
ModelxRipper ripper = new ModelxRipper(new URL("http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.SinfestRipper;
|
||||||
|
|
||||||
|
public class SinfestRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
package com.rarchives.ripme.tst.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.rippers.StaRipper;
|
||||||
|
|
||||||
|
public class StaRipperTest extends RippersTest {
|
||||||
|
public void testRip() throws IOException {
|
||||||
|
StaRipper ripper = new StaRipper(new URL("https://sta.sh/2hn9rtavr1g"));
|
||||||
|
testRipper(ripper);
|
||||||
|
}
|
||||||
|
}
|
@ -83,10 +83,10 @@ public class WordpressComicRipperTest extends RippersTest {
|
|||||||
new URL("http://tnbtu.com/comic/01-00/"));
|
new URL("http://tnbtu.com/comic/01-00/"));
|
||||||
testRipper(ripper);
|
testRipper(ripper);
|
||||||
}
|
}
|
||||||
|
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
||||||
public void test_pepsaga() throws IOException {
|
// public void test_pepsaga() throws IOException {
|
||||||
WordpressComicRipper ripper = new WordpressComicRipper(
|
// WordpressComicRipper ripper = new WordpressComicRipper(
|
||||||
new URL("http://shipinbottle.pepsaga.com/?p=281"));
|
// new URL("http://shipinbottle.pepsaga.com/?p=281"));
|
||||||
testRipper(ripper);
|
// testRipper(ripper);
|
||||||
}
|
// }
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user