commit
43cff6f802
2
pom.xml
2
pom.xml
@ -4,7 +4,7 @@
|
||||
<groupId>com.rarchives.ripme</groupId>
|
||||
<artifactId>ripme</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<version>1.7.34</version>
|
||||
<version>1.7.44</version>
|
||||
<name>ripme</name>
|
||||
<url>http://rip.rarchives.com</url>
|
||||
<properties>
|
||||
|
12
ripme.json
12
ripme.json
@ -1,6 +1,16 @@
|
||||
{
|
||||
"latestVersion": "1.7.34",
|
||||
"latestVersion": "1.7.44",
|
||||
"changeList": [
|
||||
"1.7.44: Fixed instagram ripper regex",
|
||||
"1.7.43: Fixed queryId regex in instagram ripper",
|
||||
"1.7.42: Added user support to SmuttyRipper; Removed vine ripper; Fixed NudeGalsRipper; addURLToDownload improvments; Fixed Instagram ripper",
|
||||
"1.7.41: Added support for spyingwithlana.com; Added ManganeloRipper; Added support for dynasty-scans.com",
|
||||
"1.7.40: Added hypnohub.net ripper; Fixed rule34.xxx ripper; Tsumino Ripper now add .png to filenames",
|
||||
"1.7.39: Added rule34.xxx ripper; Added Gfycatporntube.com ripper; Fixed AbstractRipper subdir bug; Added AbstractRipper unit tests",
|
||||
"1.7.38: Added http and socks proxy support; Extended some unit tests to include getGid; Added HitomiRipper; hentaifoundry ripper now can rip all images from accounts",
|
||||
"1.7.37: MInor code clean up; Added socks proxy support; Added support for 8muses.download; Hentaifoundry no longer errors when there are no more pages; Fix bug that causes tumblr to replace https with httpss when downloading resized images",
|
||||
"1.7.36: Fixed Instagram ripper; Fixed hentai2read ripper test; Fixed tnbtu.com ripper",
|
||||
"1.7.35: Fixed instagram ripper; hentai2read ripper now properly names folders",
|
||||
"1.7.34: Added Blackbrickroadofoz Ripper; Fixed webtoons regex",
|
||||
"1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
|
||||
"1.7.32: Instagram ripper update to use new enpoints",
|
||||
|
@ -27,6 +27,7 @@ import com.rarchives.ripme.ui.History;
|
||||
import com.rarchives.ripme.ui.HistoryEntry;
|
||||
import com.rarchives.ripme.ui.MainWindow;
|
||||
import com.rarchives.ripme.ui.UpdateUtils;
|
||||
import com.rarchives.ripme.utils.Proxy;
|
||||
import com.rarchives.ripme.utils.RipUtils;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
@ -47,6 +48,12 @@ public class App {
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
if (Utils.getConfigString("proxy.http", null) != null) {
|
||||
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", null));
|
||||
} else if (Utils.getConfigString("proxy.socks", null) != null) {
|
||||
Proxy.setSocks(Utils.getConfigString("proxy.socks", null));
|
||||
}
|
||||
|
||||
if (GraphicsEnvironment.isHeadless() || args.length > 0) {
|
||||
handleArguments(args);
|
||||
} else {
|
||||
@ -95,6 +102,16 @@ public class App {
|
||||
Utils.setConfigBoolean("file.overwrite", true);
|
||||
}
|
||||
|
||||
if (cl.hasOption('s')) {
|
||||
String sservfull = cl.getOptionValue('s').trim();
|
||||
Proxy.setSocks(sservfull);
|
||||
}
|
||||
|
||||
if (cl.hasOption('p')) {
|
||||
String proxyserverfull = cl.getOptionValue('p').trim();
|
||||
Proxy.setHTTPProxy(proxyserverfull);
|
||||
}
|
||||
|
||||
if (cl.hasOption('t')) {
|
||||
Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
|
||||
}
|
||||
@ -195,6 +212,7 @@ public class App {
|
||||
String url = cl.getOptionValue('u').trim();
|
||||
ripURL(url, cl.hasOption("n"));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -242,6 +260,8 @@ public class App {
|
||||
opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
|
||||
opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
|
||||
opts.addOption("v", "version", false, "Show current version");
|
||||
opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
|
||||
opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
|
||||
return opts;
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,8 @@ public abstract class AbstractRipper
|
||||
* True if downloaded successfully
|
||||
* False if failed to download
|
||||
*/
|
||||
protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies);
|
||||
protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies,
|
||||
Boolean getFileExtFromMIME);
|
||||
|
||||
/**
|
||||
* Queues image to be downloaded and saved.
|
||||
@ -212,7 +213,7 @@ public abstract class AbstractRipper
|
||||
* True if downloaded successfully
|
||||
* False if failed to download
|
||||
*/
|
||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName, String extension, Boolean getFileExtFromMIME) {
|
||||
// Don't re-add the url if it was downloaded in a previous rip
|
||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||
if (hasDownloadedURL(url.toExternalForm())) {
|
||||
@ -228,21 +229,7 @@ public abstract class AbstractRipper
|
||||
return false;
|
||||
}
|
||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||
String saveAs;
|
||||
if (fileName != null) {
|
||||
saveAs = fileName;
|
||||
// Get the extension of the file
|
||||
String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
|
||||
saveAs = saveAs + "." + extension;
|
||||
} else {
|
||||
saveAs = url.toExternalForm();
|
||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||
}
|
||||
|
||||
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
||||
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
||||
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
||||
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
||||
String saveAs = getFileName(url, fileName, extension);
|
||||
File saveFileAs;
|
||||
try {
|
||||
if (!subdirectory.equals("")) {
|
||||
@ -271,7 +258,15 @@ public abstract class AbstractRipper
|
||||
logger.debug("Unable to write URL history file");
|
||||
}
|
||||
}
|
||||
return addURLToDownload(url, saveFileAs, referrer, cookies);
|
||||
return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
|
||||
}
|
||||
|
||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies, String fileName, String extension) {
|
||||
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false);
|
||||
}
|
||||
|
||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
||||
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -306,6 +301,35 @@ public abstract class AbstractRipper
|
||||
return addURLToDownload(url, prefix, "");
|
||||
}
|
||||
|
||||
public static String getFileName(URL url, String fileName, String extension) {
|
||||
String saveAs;
|
||||
if (fileName != null) {
|
||||
saveAs = fileName;
|
||||
} else {
|
||||
saveAs = url.toExternalForm();
|
||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||
}
|
||||
if (extension == null) {
|
||||
// Get the extension of the file
|
||||
String[] lastBitOfURL = url.toExternalForm().split("/");
|
||||
|
||||
String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split(".");
|
||||
if (lastBit.length != 0) {
|
||||
extension = lastBit[lastBit.length - 1];
|
||||
saveAs = saveAs + "." + extension;
|
||||
}
|
||||
}
|
||||
|
||||
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
||||
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
||||
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
||||
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
||||
if (extension != null) {
|
||||
saveAs = saveAs + "." + extension;
|
||||
}
|
||||
return saveAs;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Waits for downloading threads to complete.
|
||||
|
@ -50,7 +50,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
/**
|
||||
* Queues multiple URLs of single images to download from a single Album URL
|
||||
*/
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
||||
// Only download one file if this is a test.
|
||||
if (super.isThisATest() &&
|
||||
(itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
|
||||
@ -82,7 +82,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
}
|
||||
else {
|
||||
itemsPending.put(url, saveAs);
|
||||
DownloadFileThread dft = new DownloadFileThread(url, saveAs, this);
|
||||
DownloadFileThread dft = new DownloadFileThread(url, saveAs, this, getFileExtFromMIME);
|
||||
if (referrer != null) {
|
||||
dft.setReferrer(referrer);
|
||||
}
|
||||
@ -96,7 +96,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
|
||||
@Override
|
||||
public boolean addURLToDownload(URL url, File saveAs) {
|
||||
return addURLToDownload(url, saveAs, null, null);
|
||||
return addURLToDownload(url, saveAs, null, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8,6 +8,7 @@ import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@ -36,10 +37,11 @@ class DownloadFileThread extends Thread {
|
||||
private String prettySaveAs;
|
||||
private AbstractRipper observer;
|
||||
private int retries;
|
||||
private Boolean getFileExtFromMIME;
|
||||
|
||||
private final int TIMEOUT;
|
||||
|
||||
public DownloadFileThread(URL url, File saveAs, AbstractRipper observer) {
|
||||
public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) {
|
||||
super();
|
||||
this.url = url;
|
||||
this.saveAs = saveAs;
|
||||
@ -47,6 +49,7 @@ class DownloadFileThread extends Thread {
|
||||
this.observer = observer;
|
||||
this.retries = Utils.getConfigInteger("download.retries", 1);
|
||||
this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
|
||||
this.getFileExtFromMIME = getFileExtFromMIME;
|
||||
}
|
||||
|
||||
public void setReferrer(String referrer) {
|
||||
@ -143,9 +146,15 @@ class DownloadFileThread extends Thread {
|
||||
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
|
||||
return;
|
||||
}
|
||||
|
||||
// Save file
|
||||
bis = new BufferedInputStream(huc.getInputStream());
|
||||
|
||||
// Check if we should get the file ext from the MIME type
|
||||
if (getFileExtFromMIME) {
|
||||
String fileExt = URLConnection.guessContentTypeFromStream(bis).replaceAll("image/", "");
|
||||
saveAs = new File(saveAs.toString() + "." + fileExt);
|
||||
}
|
||||
|
||||
fos = new FileOutputStream(saveAs);
|
||||
IOUtils.copy(bis, fos);
|
||||
break; // Download successful: break out of infinite loop
|
||||
|
@ -10,6 +10,7 @@ import java.util.Map;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import com.sun.org.apache.xpath.internal.operations.Bool;
|
||||
|
||||
public abstract class VideoRipper extends AbstractRipper {
|
||||
|
||||
@ -70,7 +71,7 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
||||
return addURLToDownload(url, saveAs);
|
||||
}
|
||||
|
||||
|
@ -55,14 +55,8 @@ public class BlackbrickroadofozRipper extends AbstractHTMLRipper {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextPage = elem.attr("href");
|
||||
// Some times this returns a empty string
|
||||
// This for stops that
|
||||
if (nextPage == "") {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
else {
|
||||
return Http.url(nextPage).get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -59,7 +59,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
|
||||
String nextPage = elem.attr("href");
|
||||
// Some times this returns a empty string
|
||||
// This for stops that
|
||||
if (nextPage == "") {
|
||||
if (nextPage.equals("")) {
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
|
@ -0,0 +1,84 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class DynastyscansRipper extends AbstractHTMLRipper {
|
||||
|
||||
public DynastyscansRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "dynasty-scans";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "dynasty-scans.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://dynasty-scans.com/chapters/([\\S]+)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected dynasty-scans URL format: " +
|
||||
"dynasty-scans.com/chapters/ID - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Element elem = doc.select("a[id=next_link]").first();
|
||||
if (elem == null || elem.attr("href").equals("#")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
return Http.url("https://dynasty-scans.com" + elem.attr("href")).get();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
String jsonText = null;
|
||||
for (Element script : doc.select("script")) {
|
||||
if (script.data().contains("var pages")) {
|
||||
jsonText = script.data().replaceAll("var pages = ", "");
|
||||
jsonText = jsonText.replaceAll("//<!\\[CDATA\\[", "");
|
||||
jsonText = jsonText.replaceAll("//]]>", "");
|
||||
}
|
||||
}
|
||||
JSONArray imageArray = new JSONArray(jsonText);
|
||||
for (int i = 0; i < imageArray.length(); i++) {
|
||||
result.add("https://dynasty-scans.com" + imageArray.getJSONObject(i).getString("image"));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -4,7 +4,6 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -86,7 +86,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
nextUrl = elem.attr("href");
|
||||
if (nextUrl == "") {
|
||||
if (nextUrl.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
return Http.url("eroshae.com" + nextUrl).get();
|
||||
|
@ -332,7 +332,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
String[] fields = u.split("/");
|
||||
String prefix = getPrefix(index) + fields[fields.length - 3];
|
||||
File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg");
|
||||
addURLToDownload(url, saveAs, "", null);
|
||||
addURLToDownload(url, saveAs, "", null, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.Connection.Method;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
@ -23,7 +22,6 @@ import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||
import com.rarchives.ripme.utils.Base64;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
@ -162,10 +160,6 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
if (!subdirectory.equals("")) {
|
||||
subdirectory = File.separator + subdirectory;
|
||||
}
|
||||
int o = url.toString().lastIndexOf('/')-1;
|
||||
String test = url.toString().substring(url.toString().lastIndexOf('/',o)+1);
|
||||
test = test.replace("/",""); // This is probably not the best way to do this.
|
||||
test = test.replace("\\",""); // CLOSE ENOUGH!
|
||||
saveFileAs = new File(
|
||||
workingDir.getCanonicalPath()
|
||||
+ subdirectory
|
||||
|
@ -0,0 +1,61 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class GfycatporntubeRipper extends AbstractHTMLRipper {
|
||||
|
||||
public GfycatporntubeRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "gfycatporntube";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "gfycatporntube.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://gfycatporntube.com/([a-zA-Z1-9_-]*)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected gfycatporntube URL format: " +
|
||||
"gfycatporntube.com/NAME - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
result.add(doc.select("source[id=mp4Source]").attr("src"));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -33,7 +33,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
||||
Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d)?/?");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
@ -63,9 +63,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
Document doc = getFirstPage();
|
||||
String title = doc.select("span[itemprop=title]").text();
|
||||
return getHost() + "_" + title;
|
||||
return getHost() + "_" + getGID(url);
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
|
@ -10,6 +10,7 @@ import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.Connection.Method;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
@ -49,15 +50,57 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
Response resp = Http.url("http://www.hentai-foundry.com/").response();
|
||||
cookies = resp.cookies();
|
||||
Response resp;
|
||||
Document doc;
|
||||
|
||||
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
|
||||
.referrer("http://www.hentai-foundry.com/")
|
||||
.cookies(cookies)
|
||||
.response();
|
||||
// The only cookie that seems to matter in getting around the age wall is the phpsession cookie
|
||||
cookies.putAll(resp.cookies());
|
||||
sleep(500);
|
||||
|
||||
doc = resp.parse();
|
||||
String csrf_token = doc.select("input[name=YII_CSRF_TOKEN]")
|
||||
.first().attr("value");
|
||||
if (csrf_token != null) {
|
||||
Map<String,String> data = new HashMap<>();
|
||||
data.put("YII_CSRF_TOKEN" , csrf_token);
|
||||
data.put("rating_nudity" , "3");
|
||||
data.put("rating_violence" , "3");
|
||||
data.put("rating_profanity", "3");
|
||||
data.put("rating_racism" , "3");
|
||||
data.put("rating_sex" , "3");
|
||||
data.put("rating_spoilers" , "3");
|
||||
data.put("rating_yaoi" , "1");
|
||||
data.put("rating_yuri" , "1");
|
||||
data.put("rating_teen" , "1");
|
||||
data.put("rating_guro" , "1");
|
||||
data.put("rating_furry" , "1");
|
||||
data.put("rating_beast" , "1");
|
||||
data.put("rating_male" , "1");
|
||||
data.put("rating_female" , "1");
|
||||
data.put("rating_futa" , "1");
|
||||
data.put("rating_other" , "1");
|
||||
data.put("rating_scat" , "1");
|
||||
data.put("rating_incest" , "1");
|
||||
data.put("rating_rape" , "1");
|
||||
data.put("filter_media" , "A");
|
||||
data.put("filter_order" , "date_new");
|
||||
data.put("filter_type" , "0");
|
||||
|
||||
resp = Http.url("http://www.hentai-foundry.com/site/filters")
|
||||
.referrer("http://www.hentai-foundry.com/")
|
||||
.cookies(cookies)
|
||||
.data(data)
|
||||
.method(Method.POST)
|
||||
.response();
|
||||
cookies.putAll(resp.cookies());
|
||||
}
|
||||
else {
|
||||
logger.info("unable to find csrf_token and set filter");
|
||||
}
|
||||
|
||||
resp = Http.url(url)
|
||||
.referrer("http://www.hentai-foundry.com/")
|
||||
.cookies(cookies)
|
||||
@ -74,12 +117,16 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
Elements els = doc.select("li.next > a");
|
||||
Element first = els.first();
|
||||
try {
|
||||
String nextURL = first.attr("href");
|
||||
nextURL = "http://www.hentai-foundry.com" + nextURL;
|
||||
return Http.url(nextURL)
|
||||
.referrer(url)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
} catch (NullPointerException e) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -97,13 +144,6 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
Document imagePage;
|
||||
try {
|
||||
Response resp = Http.url("http://www.hentai-foundry.com/").response();
|
||||
cookies = resp.cookies();
|
||||
resp = Http.url("http://www.hentai-foundry.com/?enterAgree=1&size=1500")
|
||||
.referrer("http://www.hentai-foundry.com/")
|
||||
.cookies(cookies)
|
||||
.response();
|
||||
cookies.putAll(resp.cookies());
|
||||
|
||||
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
||||
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
||||
|
@ -0,0 +1,73 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class HitomiRipper extends AbstractHTMLRipper {
|
||||
|
||||
String galleryId = "";
|
||||
|
||||
public HitomiRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "hitomi";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "hitomi.la";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
galleryId = m.group(1);
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected hitomi URL format: " +
|
||||
"https://hitomi.la/galleries/ID.html - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// if we go to /GALLERYID.js we get a nice json array of all images in the gallery
|
||||
return Http.url(new URL(url.toExternalForm().replaceAll(".html", ".js"))).ignoreContentType().get();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
String json = doc.text().replaceAll("var galleryinfo =", "");
|
||||
logger.info(json);
|
||||
JSONArray json_data = new JSONArray(json);
|
||||
for (int i = 0; i < json_data.length(); i++) {
|
||||
result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import javax.print.Doc;
|
||||
|
||||
public class HypnohubRipper extends AbstractHTMLRipper {
|
||||
|
||||
public HypnohubRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "hypnohub";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "hypnohub.net";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
p = Pattern.compile("https?://hypnohub.net/\\S+/show/([\\d]+)/([\\S]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1) + "_" + m.group(2);
|
||||
}
|
||||
throw new MalformedURLException("Expected cfake URL format: " +
|
||||
"hypnohub.net/pool/show/ID - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
private String ripPost(String url) throws IOException {
|
||||
logger.info(url);
|
||||
Document doc = Http.url(url).get();
|
||||
return "https:" + doc.select("img.image").attr("src");
|
||||
|
||||
}
|
||||
|
||||
private String ripPost(Document doc) {
|
||||
logger.info(url);
|
||||
return "https:" + doc.select("img.image").attr("src");
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
if (url.toExternalForm().contains("/pool")) {
|
||||
for (Element el : doc.select("ul[id=post-list-posts] > li > div > a.thumb")) {
|
||||
try {
|
||||
result.add(ripPost("https://hypnohub.net" + el.attr("href")));
|
||||
} catch (IOException e) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
} else if (url.toExternalForm().contains("/post")) {
|
||||
result.add(ripPost(doc));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -328,7 +328,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private String getIGGis(String variables) {
|
||||
String stringToMD5 = rhx_gis + ":" + csrftoken + ":" + variables;
|
||||
String stringToMD5 = rhx_gis + ":" + variables;
|
||||
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||
try {
|
||||
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
||||
@ -374,7 +374,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
// Sleep for a while to avoid a ban
|
||||
sleep(2500);
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}";
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
||||
String ig_gis = getIGGis(vars);
|
||||
logger.info(ig_gis);
|
||||
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars
|
||||
@ -435,6 +435,12 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
|
||||
m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
} else {
|
||||
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||
Matcher m = jsP.matcher(sb.toString());
|
||||
@ -442,7 +448,7 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
||||
return m.group(1);
|
||||
}
|
||||
}
|
||||
logger.info("Could not find query_hash on " + jsFileURL);
|
||||
logger.error("Could not find query_hash on " + jsFileURL);
|
||||
return null;
|
||||
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
|
||||
// This is here for pages with mp4s instead of images
|
||||
String video_image = "";
|
||||
video_image = page.select("div > video > source").attr("src");
|
||||
if (video_image != "") {
|
||||
if (!video_image.equals("")) {
|
||||
urls.add(video_image);
|
||||
}
|
||||
return urls;
|
||||
|
@ -0,0 +1,116 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import javax.print.Doc;
|
||||
|
||||
public class ManganeloRipper extends AbstractHTMLRipper {
|
||||
|
||||
public ManganeloRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "manganelo";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "manganelo.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://manganelo.com/manga/([\\S]+)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
p = Pattern.compile("http://manganelo.com/chapter/([\\S]+)/([\\S]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected manganelo URL format: " +
|
||||
"/manganelo.com/manga/ID - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Element elem = doc.select("div.btn-navigation-chap > a.back").first();
|
||||
if (elem == null) {
|
||||
throw new IOException("No more pages");
|
||||
} else {
|
||||
return Http.url(elem.attr("href")).get();
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> getURLsFromChap(String url) {
|
||||
logger.debug("Getting urls from " + url);
|
||||
List<String> result = new ArrayList<>();
|
||||
try {
|
||||
Document doc = Http.url(url).get();
|
||||
for (Element el : doc.select("img.img_content")) {
|
||||
result.add(el.attr("src"));
|
||||
}
|
||||
return result;
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private List<String> getURLsFromChap(Document doc) {
|
||||
logger.debug("Getting urls from " + url);
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element el : doc.select("img.img_content")) {
|
||||
result.add(el.attr("src"));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
List<String> urlsToGrab = new ArrayList<>();
|
||||
if (url.toExternalForm().contains("/manga/")) {
|
||||
for (Element el : doc.select("div.chapter-list > div.row > span > a")) {
|
||||
urlsToGrab.add(el.attr("href"));
|
||||
}
|
||||
Collections.reverse(urlsToGrab);
|
||||
|
||||
for (String url : urlsToGrab) {
|
||||
result.addAll(getURLsFromChap(url));
|
||||
}
|
||||
} else if (url.toExternalForm().contains("/chapter/")) {
|
||||
result.addAll(getURLsFromChap(doc));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -73,7 +73,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
if (m.matches()) {
|
||||
nextUrl = "http://myhentaicomics.com" + m.group(0);
|
||||
}
|
||||
if (nextUrl == "") {
|
||||
if (nextUrl.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
// Sleep for half a sec to avoid getting IP banned
|
||||
@ -100,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
|
||||
String nextPage = elem.attr("href");
|
||||
pageNumber = pageNumber + 1;
|
||||
if (nextPage == "") {
|
||||
if (nextPage.equals("")) {
|
||||
logger.info("Got " + pageNumber + " pages");
|
||||
break;
|
||||
}
|
||||
@ -220,7 +220,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
// If true the page is a page of albums
|
||||
if (doc.toString().contains("class=\"g-item g-album\"")) {
|
||||
// This if checks that there is more than 1 page
|
||||
if (doc.select("a.ui-icon-right").last().attr("href") != "") {
|
||||
if (!doc.select("a.ui-icon-right").last().attr("href").equals("")) {
|
||||
// There is more than one page so we call getListOfPages
|
||||
List<String> pagesToRip = getListOfPages(doc);
|
||||
logger.debug("Pages to rip = " + pagesToRip);
|
||||
|
@ -33,23 +33,6 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
|
||||
return "nude-gals.com";
|
||||
}
|
||||
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
Document doc = getFirstPage();
|
||||
Elements elems = doc.select("#left_col > #grid_title > .right");
|
||||
|
||||
String girl = elems.get(3).text();
|
||||
String magazine = elems.get(2).text();
|
||||
String title = elems.get(0).text();
|
||||
|
||||
return getHost() + "_" + girl + "-" + magazine + "-" + title;
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p;
|
||||
@ -79,9 +62,9 @@ public class NudeGalsRipper extends AbstractHTMLRipper {
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
|
||||
Elements thumbs = doc.select("#grid_container .grid > .grid_box");
|
||||
Elements thumbs = doc.select("img.thumbnail");
|
||||
for (Element thumb : thumbs) {
|
||||
String link = thumb.select("a").get(1).attr("href");
|
||||
String link = thumb.attr("src").replaceAll("thumbs/th_", "");
|
||||
String imgSrc = "http://nude-gals.com/" + link;
|
||||
imageURLs.add(imgSrc);
|
||||
}
|
||||
|
@ -0,0 +1,94 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class Rule34Ripper extends AbstractHTMLRipper {
|
||||
|
||||
public Rule34Ripper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
private String apiUrl;
|
||||
private int pageNumber = 0;
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "rule34";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "rule34.xxx";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url){
|
||||
Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected rule34.xxx URL format: " +
|
||||
"rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead");
|
||||
}
|
||||
|
||||
public URL getAPIUrl() throws MalformedURLException {
|
||||
URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url));
|
||||
return urlToReturn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
apiUrl = getAPIUrl().toExternalForm();
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(getAPIUrl()).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.html().contains("Search error: API limited due to abuse")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
pageNumber += 1;
|
||||
String nextPage = apiUrl + "&pid=" + pageNumber;
|
||||
return Http.url(nextPage).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element el : doc.select("posts > post")) {
|
||||
String imageSource = el.select("post").attr("file_url");
|
||||
result.add(imageSource);
|
||||
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -57,7 +57,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
|
||||
String nextPage = elem.parent().attr("href");
|
||||
// Some times this returns a empty string
|
||||
// This for stops that
|
||||
if (nextPage == "") {
|
||||
if (nextPage.equals("")) {
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
|
@ -3,24 +3,19 @@ package com.rarchives.ripme.ripper.rippers;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
/**
|
||||
* Appears to be broken as of 2015-02-11.
|
||||
* Generating large image from thumbnail requires replacing "/m/" with something else:
|
||||
* -> Sometimes "/b/"
|
||||
* -> Sometimes "/p/"
|
||||
* No way to know without loading the image page.
|
||||
*/
|
||||
public class SmuttyRipper extends AlbumRipper {
|
||||
|
||||
public class SmuttyRipper extends AbstractHTMLRipper {
|
||||
|
||||
private static final String DOMAIN = "smutty.com",
|
||||
HOST = "smutty";
|
||||
@ -29,6 +24,16 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "smutty";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "smutty.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
return (url.getHost().endsWith(DOMAIN));
|
||||
@ -40,31 +45,8 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
int page = 0;
|
||||
String url, tag = getGID(this.url);
|
||||
boolean hasNextPage = true;
|
||||
while (hasNextPage) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
page++;
|
||||
url = "http://smutty.com/h/" + tag + "/?q=%23" + tag + "&page=" + page + "&sort=date&lazy=1";
|
||||
this.sendUpdate(STATUS.LOADING_RESOURCE, url);
|
||||
logger.info(" Retrieving " + url);
|
||||
Document doc;
|
||||
try {
|
||||
doc = Http.url(url)
|
||||
.ignoreContentType()
|
||||
.get();
|
||||
} catch (IOException e) {
|
||||
if (e.toString().contains("Status=404")) {
|
||||
logger.info("No more pages to load");
|
||||
} else {
|
||||
logger.warn("Exception while loading " + url, e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> results = new ArrayList<>();
|
||||
for (Element image : doc.select("a.l > img")) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
@ -84,25 +66,36 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
imageUrl = sb.toString();
|
||||
addURLToDownload(new URL("http:" + imageUrl));
|
||||
results.add("http:" + imageUrl);
|
||||
}
|
||||
if (doc.select("#next").size() == 0) {
|
||||
break; // No more pages
|
||||
}
|
||||
// Wait before loading next page
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("[!] Interrupted while waiting to load next album:", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
waitForThreads();
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Element elem = doc.select("a.next").first();
|
||||
if (elem == null) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextPage = elem.attr("href");
|
||||
// Some times this returns a empty string
|
||||
// This for stops that
|
||||
if (nextPage.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
else {
|
||||
return Http.url("https://smutty.com" + nextPage).get();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -117,6 +110,12 @@ public class SmuttyRipper extends AlbumRipper {
|
||||
if (m.matches()) {
|
||||
return m.group(1).replace("%23", "");
|
||||
}
|
||||
|
||||
p = Pattern.compile("^https?://smutty.com/user/([a-zA-Z0-9\\-_]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected tag in URL (smutty.com/h/tag and not " + url);
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,6 @@ import org.json.JSONObject;
|
||||
import org.jsoup.Connection;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
@ -35,13 +34,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
// This sessionId will expire and need to be replaced
|
||||
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
|
||||
logger.info(cookies);
|
||||
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
|
||||
String jsonInfo = doc.html().replaceAll("<html>","").replaceAll("<head></head>", "").replaceAll("<body>", "").replaceAll("</body>", "")
|
||||
.replaceAll("</html>", "").replaceAll("\n", "");
|
||||
logger.info(jsonInfo);
|
||||
JSONObject json = new JSONObject(jsonInfo);
|
||||
logger.info(json.getJSONArray("reader_page_urls"));
|
||||
return json.getJSONArray("reader_page_urls");
|
||||
} catch (IOException e) {
|
||||
logger.info(e);
|
||||
@ -85,7 +81,6 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
||||
public Document getFirstPage() throws IOException {
|
||||
Connection.Response resp = Http.url(url).response();
|
||||
cookies.putAll(resp.cookies());
|
||||
logger.info(resp.parse());
|
||||
return resp.parse();
|
||||
}
|
||||
|
||||
@ -103,6 +98,10 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
sleep(1000);
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
/*
|
||||
There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always
|
||||
"image/jpeg" even when the image is a png. The file ext is not included in the url.
|
||||
*/
|
||||
addURLToDownload(url, getPrefix(index), "", null, null, null, null, true);
|
||||
}
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
|
||||
fileURL = new URL(urlString);
|
||||
} else {
|
||||
fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http", "https"));
|
||||
fileURL = new URL(photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"));
|
||||
}
|
||||
m = p.matcher(fileURL.toString());
|
||||
if (m.matches()) {
|
||||
|
@ -4,14 +4,12 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
@ -1,95 +0,0 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.HttpStatusException;
|
||||
|
||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class VineRipper extends AlbumRipper {
|
||||
|
||||
private static final String DOMAIN = "vine.co",
|
||||
HOST = "vine";
|
||||
|
||||
public VineRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
return url.getHost().endsWith(DOMAIN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
return new URL("http://vine.co/u/" + getGID(url));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
int page = 0;
|
||||
String baseURL = "https://vine.co/api/timelines/users/" + getGID(this.url);
|
||||
JSONObject json = null;
|
||||
while (true) {
|
||||
page++;
|
||||
String theURL = baseURL;
|
||||
if (page > 1) {
|
||||
theURL += "?page=" + page;
|
||||
}
|
||||
try {
|
||||
logger.info(" Retrieving " + theURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, theURL);
|
||||
json = Http.url(theURL).getJSON();
|
||||
} catch (HttpStatusException e) {
|
||||
logger.debug("Hit end of pages at page " + page, e);
|
||||
break;
|
||||
}
|
||||
JSONArray records = json.getJSONObject("data").getJSONArray("records");
|
||||
for (int i = 0; i < records.length(); i++) {
|
||||
String videoURL = records.getJSONObject(i).getString("videoUrl");
|
||||
addURLToDownload(new URL(videoURL));
|
||||
if (isThisATest()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (isThisATest()) {
|
||||
break;
|
||||
}
|
||||
if (records.length() == 0) {
|
||||
logger.info("Zero records returned");
|
||||
break;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(2000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("[!] Interrupted while waiting to load next page", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
waitForThreads();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://(www\\.)?vine\\.co/u/([0-9]+).*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (!m.matches()) {
|
||||
throw new MalformedURLException("Expected format: http://vine.co/u/######");
|
||||
}
|
||||
return m.group(m.groupCount());
|
||||
}
|
||||
|
||||
}
|
@ -44,7 +44,20 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
"freeadultcomix.com",
|
||||
"thisis.delvecomic.com",
|
||||
"tnbtu.com",
|
||||
"shipinbottle.pepsaga.com"
|
||||
"shipinbottle.pepsaga.com",
|
||||
"8muses.download",
|
||||
"spyingwithlana.com"
|
||||
);
|
||||
|
||||
private static List<String> theme1 = Arrays.asList(
|
||||
"www.totempole666.com",
|
||||
"buttsmithy.com",
|
||||
"themonsterunderthebed.net",
|
||||
"prismblush.com",
|
||||
"www.konradokonski.com",
|
||||
"thisis.delvecomic.com",
|
||||
"tnbtu.com",
|
||||
"spyingwithlana.com"
|
||||
);
|
||||
|
||||
@Override
|
||||
@ -135,6 +148,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
if (shipinbottleMat.matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
|
||||
Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
|
||||
if (eight_musesMat.matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
|
||||
Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
|
||||
if (spyingwithlanaMat.matches()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -209,6 +234,18 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + "Ship_in_bottle";
|
||||
}
|
||||
|
||||
Pattern eight_musesPat = Pattern.compile("https?://8muses.download/([a-zA-Z0-9_-]+)/?$");
|
||||
Matcher eight_musesMat = eight_musesPat.matcher(url.toExternalForm());
|
||||
if (eight_musesMat.matches()) {
|
||||
return getHost() + "_" + eight_musesMat.group(1);
|
||||
}
|
||||
|
||||
Pattern spyingwithlanaPat = Pattern.compile("https?://spyingwithlana.com/comic/([a-zA-Z0-9_-]+)/?$");
|
||||
Matcher spyingwithlanaMat = spyingwithlanaPat.matcher(url.toExternalForm());
|
||||
if (spyingwithlanaMat.matches()) {
|
||||
return "spyingwithlana_" + spyingwithlanaMat.group(1).replaceAll("-page-\\d", "");
|
||||
}
|
||||
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
@ -227,13 +264,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
// Find next page
|
||||
String nextPage = "";
|
||||
Element elem = null;
|
||||
if (getHost().contains("www.totempole666.com")
|
||||
|| getHost().contains("buttsmithy.com")
|
||||
|| getHost().contains("themonsterunderthebed.net")
|
||||
|| getHost().contains("prismblush.com")
|
||||
|| getHost().contains("www.konradokonski.com")
|
||||
|| getHost().contains("thisis.delvecomic.com")
|
||||
|| getHost().contains("tnbtu.com")) {
|
||||
if (theme1.contains(getHost())) {
|
||||
elem = doc.select("a.comic-nav-next").first();
|
||||
if (elem == null) {
|
||||
throw new IOException("No more pages");
|
||||
@ -247,7 +278,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
nextPage = elem.attr("href");
|
||||
}
|
||||
|
||||
if (nextPage == "") {
|
||||
if (nextPage.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
} else {
|
||||
return Http.url(nextPage).get();
|
||||
@ -257,13 +288,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
if (getHost().contains("www.totempole666.com")
|
||||
|| getHost().contains("buttsmithy.com")
|
||||
|| getHost().contains("themonsterunderthebed.net")
|
||||
|| getHost().contains("prismblush.com")
|
||||
|| getHost().contains("www.konradokonski.com")
|
||||
|| getHost().contains("thisis.delvecomic.com")
|
||||
|| getHost().contains("tnbtu.com")) {
|
||||
if (theme1.contains(getHost())) {
|
||||
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
|
||||
// If doc is the last page in the comic then elem.attr("src") returns null
|
||||
// because there is no link <a> to the next page
|
||||
@ -315,6 +340,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (url.toExternalForm().contains("8muses.download")) {
|
||||
for (Element elem : doc.select("div.popup-gallery > figure > a")) {
|
||||
result.add(elem.attr("href"));
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -327,10 +358,16 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
|| getHost().contains("themonsterunderthebed.net")) {
|
||||
addURLToDownload(url, pageTitle + "_");
|
||||
}
|
||||
if (getHost().contains("tnbtu.com")) {
|
||||
// We need to set the referrer header for tnbtu
|
||||
addURLToDownload(url, getPrefix(index), "","http://www.tnbtu.com/comic", null);
|
||||
} else {
|
||||
// If we're ripping a site where we can't get the page number/title we just rip normally
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
|
@ -72,7 +72,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.select("a.next").first().attr("href") != "") {
|
||||
if (!doc.select("a.next").first().attr("href").equals("")) {
|
||||
return Http.url(doc.select("a.next").first().attr("href")).get();
|
||||
} else {
|
||||
throw new IOException("No more pages");
|
||||
|
@ -3,7 +3,6 @@ package com.rarchives.ripme.ripper.rippers.video;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
|
||||
public class UpdateUtils {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||
private static final String DEFAULT_VERSION = "1.7.34";
|
||||
private static final String DEFAULT_VERSION = "1.7.44";
|
||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||
private static final String mainFileName = "ripme.jar";
|
||||
|
99
src/main/java/com/rarchives/ripme/utils/Proxy.java
Normal file
99
src/main/java/com/rarchives/ripme/utils/Proxy.java
Normal file
@ -0,0 +1,99 @@
|
||||
package com.rarchives.ripme.utils;
|
||||
|
||||
import java.net.Authenticator;
|
||||
import java.net.PasswordAuthentication;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
/**
|
||||
* Proxy/Socks setter
|
||||
*/
|
||||
public class Proxy {
|
||||
private Proxy() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the proxy server settings from string, using the format
|
||||
* [user:password]@host[:port].
|
||||
*
|
||||
* @param fullproxy the string to parse
|
||||
* @return HashMap containing proxy server, port, user and password
|
||||
*/
|
||||
private static Map<String, String> parseServer(String fullproxy) {
|
||||
Map<String, String> proxy = new HashMap<String, String>();
|
||||
|
||||
if (fullproxy.lastIndexOf("@") != -1) {
|
||||
int sservli = fullproxy.lastIndexOf("@");
|
||||
String userpw = fullproxy.substring(0, sservli);
|
||||
String[] usersplit = userpw.split(":");
|
||||
proxy.put("user", usersplit[0]);
|
||||
proxy.put("password", usersplit[1]);
|
||||
fullproxy = fullproxy.substring(sservli + 1);
|
||||
}
|
||||
String[] servsplit = fullproxy.split(":");
|
||||
if (servsplit.length == 2) {
|
||||
proxy.put("port", servsplit[1]);
|
||||
}
|
||||
proxy.put("server", servsplit[0]);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a HTTP Proxy.
|
||||
* WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless
|
||||
* passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java
|
||||
* see https://stackoverflow.com/q/41505219
|
||||
*
|
||||
* @param fullproxy the proxy, using format [user:password]@host[:port]
|
||||
*/
|
||||
public static void setHTTPProxy(String fullproxy) {
|
||||
Map<String, String> proxyServer = parseServer(fullproxy);
|
||||
|
||||
if (proxyServer.get("user") != null && proxyServer.get("password") != null) {
|
||||
Authenticator.setDefault(new Authenticator(){
|
||||
protected PasswordAuthentication getPasswordAuthentication(){
|
||||
PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray());
|
||||
return p;
|
||||
}
|
||||
});
|
||||
System.setProperty("http.proxyUser", proxyServer.get("user"));
|
||||
System.setProperty("http.proxyPassword", proxyServer.get("password"));
|
||||
System.setProperty("https.proxyUser", proxyServer.get("user"));
|
||||
System.setProperty("https.proxyPassword", proxyServer.get("password"));
|
||||
}
|
||||
|
||||
if (proxyServer.get("port") != null) {
|
||||
System.setProperty("http.proxyPort", proxyServer.get("port"));
|
||||
System.setProperty("https.proxyPort", proxyServer.get("port"));
|
||||
}
|
||||
|
||||
System.setProperty("http.proxyHost", proxyServer.get("server"));
|
||||
System.setProperty("https.proxyHost", proxyServer.get("server"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a Socks Proxy Server (globally).
|
||||
*
|
||||
* @param fullsocks the socks server, using format [user:password]@host[:port]
|
||||
*/
|
||||
public static void setSocks(String fullsocks) {
|
||||
|
||||
Map<String, String> socksServer = parseServer(fullsocks);
|
||||
if (socksServer.get("user") != null && socksServer.get("password") != null) {
|
||||
Authenticator.setDefault(new Authenticator(){
|
||||
protected PasswordAuthentication getPasswordAuthentication(){
|
||||
PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray());
|
||||
return p;
|
||||
}
|
||||
});
|
||||
System.setProperty("java.net.socks.username", socksServer.get("user"));
|
||||
System.setProperty("java.net.socks.password", socksServer.get("password"));
|
||||
}
|
||||
if (socksServer.get("port") != null) {
|
||||
System.setProperty("socksProxyPort", socksServer.get("port"));
|
||||
}
|
||||
|
||||
System.setProperty("socksProxyHost", socksServer.get("server"));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
package com.rarchives.ripme.tst;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
|
||||
|
||||
public class AbstractRipperTest extends TestCase {
|
||||
|
||||
public void testGetFileName() throws IOException {
|
||||
String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test");
|
||||
assertEquals("test.test", fileName);
|
||||
|
||||
fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null);
|
||||
assertEquals("test", fileName);
|
||||
|
||||
fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null);
|
||||
assertEquals("Object", fileName);
|
||||
|
||||
fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null);
|
||||
assertEquals("file.png", fileName);
|
||||
|
||||
fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null);
|
||||
assertEquals("file.", fileName);
|
||||
}
|
||||
|
||||
}
|
52
src/test/java/com/rarchives/ripme/tst/proxyTest.java
Normal file
52
src/test/java/com/rarchives/ripme/tst/proxyTest.java
Normal file
@ -0,0 +1,52 @@
|
||||
package com.rarchives.ripme.tst;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import com.rarchives.ripme.utils.Proxy;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import junit.framework.TestCase;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
|
||||
public class proxyTest extends TestCase {
|
||||
|
||||
|
||||
// This test will only run on machines where the user has added a entry for proxy.socks
|
||||
public void testSocksProxy() throws IOException {
|
||||
// Unset proxy before testing
|
||||
System.setProperty("http.proxyHost", "");
|
||||
System.setProperty("https.proxyHost", "");
|
||||
System.setProperty("socksProxyHost", "");
|
||||
URL url = new URL("https://icanhazip.com");
|
||||
String proxyConfig = Utils.getConfigString("proxy.socks", "");
|
||||
if (!proxyConfig.equals("")) {
|
||||
String ip1 = Http.url(url).ignoreContentType().get().text();
|
||||
Proxy.setSocks(Utils.getConfigString("proxy.socks", ""));
|
||||
String ip2 = Http.url(url).ignoreContentType().get().text();
|
||||
assertFalse(ip1.equals(ip2));
|
||||
} else {
|
||||
System.out.println("Skipping testSocksProxy");
|
||||
assert(true);
|
||||
}
|
||||
}
|
||||
|
||||
// This test will only run on machines where the user has added a entry for proxy.http
|
||||
public void testHTTPProxy() throws IOException {
|
||||
// Unset proxy before testing
|
||||
System.setProperty("http.proxyHost", "");
|
||||
System.setProperty("https.proxyHost", "");
|
||||
System.setProperty("socksProxyHost", "");
|
||||
URL url = new URL("https://icanhazip.com");
|
||||
String proxyConfig = Utils.getConfigString("proxy.http", "");
|
||||
if (!proxyConfig.equals("")) {
|
||||
String ip1 = Http.url(url).ignoreContentType().get().text();
|
||||
Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", ""));
|
||||
String ip2 = Http.url(url).ignoreContentType().get().text();
|
||||
assertFalse(ip1.equals(ip2));
|
||||
} else {
|
||||
System.out.println("Skipping testHTTPProxy");
|
||||
assert(true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -3,7 +3,7 @@ package com.rarchives.ripme.tst.ripper.rippers;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;;
|
||||
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;
|
||||
|
||||
public class AerisdiesRipperTest extends RippersTest {
|
||||
public void testAlbum() throws IOException {
|
||||
|
@ -0,0 +1,18 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.DynastyscansRipper;
|
||||
|
||||
public class DynastyscansRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"));
|
||||
assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")));
|
||||
}
|
||||
}
|
@ -17,4 +17,9 @@ public class EightmusesRipperTest extends RippersTest {
|
||||
ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGID() throws IOException {
|
||||
EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"));
|
||||
assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")));
|
||||
}
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper;
|
||||
|
||||
public class GfycatporntubeRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -9,5 +9,7 @@ public class Hentai2readRipperTest extends RippersTest {
|
||||
public void testHentai2readAlbum() throws IOException {
|
||||
Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/"));
|
||||
testRipper(ripper);
|
||||
ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.HitomiRipper;
|
||||
|
||||
public class HitomiRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html"));
|
||||
testRipper(ripper);
|
||||
assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973"));
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.HypnohubRipper;
|
||||
|
||||
public class HypnohubRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
|
||||
URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
|
||||
HypnohubRipper ripper = new HypnohubRipper(poolURL);
|
||||
testRipper(ripper);
|
||||
ripper = new HypnohubRipper(postURL);
|
||||
testRipper(ripper);
|
||||
}
|
||||
public void testGetGID() throws IOException {
|
||||
URL poolURL = new URL("http://hypnohub.net/pool/show/2303");
|
||||
HypnohubRipper ripper = new HypnohubRipper(poolURL);
|
||||
assertEquals("2303", ripper.getGID(poolURL));
|
||||
|
||||
URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-");
|
||||
assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL));
|
||||
}
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.ManganeloRipper;
|
||||
|
||||
public class ManganeloRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
ManganeloRipper ripper = new ManganeloRipper(new URL("http://manganelo.com/manga/black_clover"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -10,4 +10,9 @@ public class ModelmayhemRipperTest extends RippersTest {
|
||||
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
ModelmayhemRipper ripper = new ModelmayhemRipper(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"));
|
||||
assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall")));
|
||||
}
|
||||
}
|
||||
|
@ -10,4 +10,15 @@ public class MyhentaicomicsRipperTest extends RippersTest {
|
||||
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales");
|
||||
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
|
||||
// Test a comic
|
||||
assertEquals("Nienna-Lost-Tales", ripper.getGID(url));
|
||||
// Test a search
|
||||
assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test")));
|
||||
// Test a tag
|
||||
assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/")));
|
||||
}
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.NudeGalsRipper;
|
||||
|
||||
public class NudeGalsRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"));
|
||||
assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")));
|
||||
}
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.Rule34Ripper;
|
||||
|
||||
public class Rule34RipperTest extends RippersTest {
|
||||
public void testShesFreakyRip() throws IOException {
|
||||
Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.SmuttyRipper;
|
||||
|
||||
public class SmuttyRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.VineRipper;
|
||||
|
||||
public class VineRipperTest extends RippersTest {
|
||||
// https://github.com/RipMeApp/ripme/issues/181
|
||||
/*
|
||||
public void testVineRip() throws IOException {
|
||||
VineRipper ripper = new VineRipper(new URL("https://vine.co/u/954440445776334848"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
*/
|
||||
}
|
@ -55,6 +55,7 @@ public class WordpressComicRipperTest extends RippersTest {
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||
new URL("http://www.konradokonski.com/sawdust/comic/get-up/"));
|
||||
testRipper(ripper);
|
||||
|
||||
}
|
||||
|
||||
public void test_konradokonski_2() throws IOException {
|
||||
@ -63,6 +64,13 @@ public class WordpressComicRipperTest extends RippersTest {
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void test_konradokonski_getAlbumTitle() throws IOException {
|
||||
URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/");
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||
assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url));
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
||||
public void test_freeadultcomix() throws IOException {
|
||||
@ -83,6 +91,32 @@ public class WordpressComicRipperTest extends RippersTest {
|
||||
new URL("http://tnbtu.com/comic/01-00/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void test_Eightmuses_download() throws IOException {
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||
new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void test_Eightmuses_getAlbumTitle() throws IOException {
|
||||
URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/");
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||
assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses",
|
||||
ripper.getAlbumTitle(url));
|
||||
}
|
||||
|
||||
public void test_spyingwithlana_download() throws IOException {
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(
|
||||
new URL("http://spyingwithlana.com/comic/the-big-hookup/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void test_spyingwithlana_getAlbumTitle() throws IOException {
|
||||
URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/");
|
||||
WordpressComicRipper ripper = new WordpressComicRipper(url);
|
||||
assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url));
|
||||
}
|
||||
|
||||
// https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI
|
||||
// public void test_pepsaga() throws IOException {
|
||||
// WordpressComicRipper ripper = new WordpressComicRipper(
|
||||
|
Loading…
Reference in New Issue
Block a user