Merge pull request #116 from EraYaN/master
Made ChanRipper more universal.
This commit is contained in:
commit
f82c1f56ef
17
nbactions.xml
Normal file
17
nbactions.xml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<actions>
|
||||||
|
<action>
|
||||||
|
<actionName>run</actionName>
|
||||||
|
<packagings>
|
||||||
|
<packaging>jar</packaging>
|
||||||
|
</packagings>
|
||||||
|
<goals>
|
||||||
|
<goal>process-classes</goal>
|
||||||
|
<goal>org.codehaus.mojo:exec-maven-plugin:1.2.1:exec</goal>
|
||||||
|
</goals>
|
||||||
|
<properties>
|
||||||
|
<exec.args>-classpath %classpath com.rarchives.ripme.App</exec.args>
|
||||||
|
<exec.executable>java</exec.executable>
|
||||||
|
</properties>
|
||||||
|
</action>
|
||||||
|
</actions>
|
@ -94,14 +94,14 @@ public abstract class AbstractRipper
|
|||||||
* @param saveAs
|
* @param saveAs
|
||||||
* Path of the local file to save the content to.
|
* Path of the local file to save the content to.
|
||||||
*/
|
*/
|
||||||
public abstract void addURLToDownload(URL url, File saveAs);
|
public abstract boolean addURLToDownload(URL url, File saveAs);
|
||||||
public abstract void addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies);
|
public abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies);
|
||||||
|
|
||||||
public void addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies) {
|
public boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies) {
|
||||||
try {
|
try {
|
||||||
stopCheck();
|
stopCheck();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
String saveAs = url.toExternalForm();
|
String saveAs = url.toExternalForm();
|
||||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||||
@ -122,14 +122,14 @@ public abstract class AbstractRipper
|
|||||||
+ saveAs);
|
+ saveAs);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
|
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("Downloading " + url + " to " + saveFileAs);
|
logger.debug("Downloading " + url + " to " + saveFileAs);
|
||||||
if (!saveFileAs.getParentFile().exists()) {
|
if (!saveFileAs.getParentFile().exists()) {
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||||
saveFileAs.getParentFile().mkdirs();
|
saveFileAs.getParentFile().mkdirs();
|
||||||
}
|
}
|
||||||
addURLToDownload(url, saveFileAs, referrer, cookies);
|
return addURLToDownload(url, saveFileAs, referrer, cookies);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -141,8 +141,8 @@ public abstract class AbstractRipper
|
|||||||
* @param subdirectory
|
* @param subdirectory
|
||||||
* Sub-directory of the working directory to save the images to.
|
* Sub-directory of the working directory to save the images to.
|
||||||
*/
|
*/
|
||||||
public void addURLToDownload(URL url, String prefix, String subdirectory) {
|
public boolean addURLToDownload(URL url, String prefix, String subdirectory) {
|
||||||
addURLToDownload(url, prefix, subdirectory, null, null);
|
return addURLToDownload(url, prefix, subdirectory, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -153,9 +153,9 @@ public abstract class AbstractRipper
|
|||||||
* @param prefix
|
* @param prefix
|
||||||
* Text to append to saved filename.
|
* Text to append to saved filename.
|
||||||
*/
|
*/
|
||||||
public void addURLToDownload(URL url, String prefix) {
|
public boolean addURLToDownload(URL url, String prefix) {
|
||||||
// Use empty subdirectory
|
// Use empty subdirectory
|
||||||
addURLToDownload(url, prefix, "");
|
return addURLToDownload(url, prefix, "");
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Waits for downloading threads to complete.
|
* Waits for downloading threads to complete.
|
||||||
|
@ -33,14 +33,14 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
|
||||||
if (!allowDuplicates()
|
if (!allowDuplicates()
|
||||||
&& ( itemsPending.containsKey(url)
|
&& ( itemsPending.containsKey(url)
|
||||||
|| itemsCompleted.containsKey(url)
|
|| itemsCompleted.containsKey(url)
|
||||||
|| itemsErrored.containsKey(url) )) {
|
|| itemsErrored.containsKey(url) )) {
|
||||||
// Item is already downloaded/downloading, skip it.
|
// Item is already downloaded/downloading, skip it.
|
||||||
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
||||||
// Output URL to file
|
// Output URL to file
|
||||||
@ -68,11 +68,12 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
}
|
}
|
||||||
threadPool.addThread(dft);
|
threadPool.addThread(dft);
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void addURLToDownload(URL url, File saveAs) {
|
public boolean addURLToDownload(URL url, File saveAs) {
|
||||||
addURLToDownload(url, saveAs, null, null);
|
return addURLToDownload(url, saveAs, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -80,10 +81,12 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
* Uses filename from URL to decide filename.
|
* Uses filename from URL to decide filename.
|
||||||
* @param url
|
* @param url
|
||||||
* URL to download
|
* URL to download
|
||||||
|
* @return
|
||||||
|
* True on success
|
||||||
*/
|
*/
|
||||||
public void addURLToDownload(URL url) {
|
public boolean addURLToDownload(URL url) {
|
||||||
// Use empty prefix and empty subdirectory
|
// Use empty prefix and empty subdirectory
|
||||||
addURLToDownload(url, "", "");
|
return addURLToDownload(url, "", "");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -146,6 +149,8 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
* Sets directory to save all ripped files to.
|
* Sets directory to save all ripped files to.
|
||||||
* @param url
|
* @param url
|
||||||
* URL to define how the working directory should be saved.
|
* URL to define how the working directory should be saved.
|
||||||
|
* @throws
|
||||||
|
* IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setWorkingDir(URL url) throws IOException {
|
public void setWorkingDir(URL url) throws IOException {
|
||||||
|
@ -12,12 +12,48 @@ import org.jsoup.nodes.Document;
|
|||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
public class ChanRipper extends AbstractHTMLRipper {
|
public class ChanRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
//ArrayList<String> explicit_domains = new ArrayList<String>();
|
||||||
|
public static List<ChanSite> explicit_domains = Arrays.asList(
|
||||||
|
//Tested (main boards)
|
||||||
|
//Untested (main boards)
|
||||||
|
new ChanSite(Arrays.asList("anon-ib.com")),
|
||||||
|
new ChanSite(Arrays.asList("boards.4chan.org"),Arrays.asList("4cdn.org")),
|
||||||
|
//Tested (archives)
|
||||||
|
new ChanSite(Arrays.asList("archive.moe"),Arrays.asList("data.archive.moe")), //4chan archive (successor of foolz archive) Archives: [ a / biz / c / co / diy / gd / i / int / jp / m / mlp / out / po / q / s4s / sci / sp / tg / tv / v / vg / vp / vr / wsg ]
|
||||||
|
//Untested (archives)new ChanSite(Arrays.asList("anon-ib.com")),
|
||||||
|
new ChanSite(Arrays.asList("4archive.org"),Arrays.asList("imgur.com")), //4chan archive (on demand)
|
||||||
|
new ChanSite(Arrays.asList("archive.4plebs.org"),Arrays.asList("img.4plebs.org")), //4chan archive Archives: [ adv / f / hr / o / pol / s4s / tg / trv / tv / x ] Boards: [ plebs ]
|
||||||
|
new ChanSite(Arrays.asList("fgts.jp"),Arrays.asList("dat.fgts.jp")) //4chan archive Archives: [ asp / cm / h / hc / hm / n / p / r / s / soc / y ]
|
||||||
|
);
|
||||||
|
public static List<String> url_piece_blacklist = Arrays.asList(
|
||||||
|
"=http",
|
||||||
|
"http://imgops.com/",
|
||||||
|
"iqdb.org",
|
||||||
|
"saucenao.com"
|
||||||
|
);
|
||||||
|
|
||||||
|
public ChanSite chanSite;
|
||||||
|
public Boolean generalChanSite = true;
|
||||||
|
|
||||||
public ChanRipper(URL url) throws IOException {
|
public ChanRipper(URL url) throws IOException {
|
||||||
super(url);
|
super(url);
|
||||||
|
for (ChanSite _chanSite : explicit_domains) {
|
||||||
|
for (String host : _chanSite.domains) {
|
||||||
|
if (url.getHost().equals(host)) {
|
||||||
|
chanSite = _chanSite;
|
||||||
|
generalChanSite = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(chanSite==null){
|
||||||
|
chanSite = new ChanSite(Arrays.asList("url.getHost()"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -34,38 +70,39 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean canRip(URL url) {
|
public boolean canRip(URL url) {
|
||||||
// TODO Whitelist?
|
//explicit_domains testing
|
||||||
if (url.getHost().equals("anon-ib.com")) {
|
for (ChanSite _chanSite : explicit_domains) {
|
||||||
|
for (String host : _chanSite.domains) {
|
||||||
|
if (url.getHost().equals(host)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return url.getHost().contains("chan") &&
|
|
||||||
( url.toExternalForm().contains("/res/") // Most chans
|
|
||||||
|| url.toExternalForm().contains("/thread/")); // 4chan
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
//It'll fail further down the road.
|
||||||
|
return url.toExternalForm().contains("/res/") // Most chans
|
||||||
|
|| url.toExternalForm().contains("/thread/"); // 4chan, archive.moe
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* For example the achrives are all known. (Check 4chan-x)
|
||||||
|
* Should be based on the software the specific chan uses.
|
||||||
|
* FoolFuuka uses the same (url) layout as 4chan
|
||||||
|
* */
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
Pattern p; Matcher m;
|
Pattern p; Matcher m;
|
||||||
|
|
||||||
String u = url.toExternalForm();
|
String u = url.toExternalForm();
|
||||||
if (u.contains("/res/")) {
|
if (u.contains("/thread/")||u.contains("/res/")) {
|
||||||
p = Pattern.compile("^.*(chan|anon-ib).*\\.[a-z]{2,3}/[a-zA-Z0-9/]+/res/([0-9]+)(\\.html|\\.php)?.*$");
|
p = Pattern.compile("^.*\\.[a-z]{1,3}/[a-zA-Z0-9]+/(thread|res)/([0-9]+)(\\.html|\\.php)?.*$");
|
||||||
m = p.matcher(u);
|
m = p.matcher(u);
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
return m.group(2);
|
return m.group(2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (u.contains("/thread/")) {
|
|
||||||
p = Pattern.compile("^.*chan.*\\.[a-z]{2,3}/[a-zA-Z0-9]+/thread/([0-9]+)(\\.html|\\.php)?.*$");
|
|
||||||
m = p.matcher(u);
|
|
||||||
if (m.matches()) {
|
|
||||||
return m.group(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new MalformedURLException(
|
throw new MalformedURLException(
|
||||||
"Expected *chan URL formats: "
|
"Expected *chan URL formats: "
|
||||||
+ "*chan.com/@/res/####.html"
|
+ ".*/@/(res|thread)/####.html"
|
||||||
+ " Got: " + u);
|
+ " Got: " + u);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,36 +120,47 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
public List<String> getURLsFromPage(Document page) {
|
public List<String> getURLsFromPage(Document page) {
|
||||||
List<String> imageURLs = new ArrayList<String>();
|
List<String> imageURLs = new ArrayList<String>();
|
||||||
Pattern p; Matcher m;
|
Pattern p; Matcher m;
|
||||||
|
elementloop:
|
||||||
for (Element link : page.select("a")) {
|
for (Element link : page.select("a")) {
|
||||||
if (!link.hasAttr("href")) {
|
if (!link.hasAttr("href")) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!link.attr("href").contains("/src/")
|
String href = link.attr("href");
|
||||||
&& !link.attr("href").contains("4cdn.org")) {
|
|
||||||
logger.debug("Skipping link that does not contain /src/: " + link.attr("href"));
|
//Check all blacklist items
|
||||||
continue;
|
for(String blacklist_item : url_piece_blacklist){
|
||||||
|
if (href.contains(blacklist_item)){
|
||||||
|
logger.debug("Skipping link that contains '"+blacklist_item+"': " + href);
|
||||||
|
continue elementloop;
|
||||||
}
|
}
|
||||||
if (link.attr("href").contains("=http")
|
|
||||||
|| link.attr("href").contains("http://imgops.com/")) {
|
|
||||||
logger.debug("Skipping link that contains '=http' or 'imgops.com': " + link.attr("href"));
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|webm)$", Pattern.CASE_INSENSITIVE);
|
Boolean self_hosted = false;
|
||||||
m = p.matcher(link.attr("href"));
|
if(!generalChanSite){
|
||||||
|
for(String cdnDomain : chanSite.cdnDomains){
|
||||||
|
if (href.contains(cdnDomain)){
|
||||||
|
self_hosted = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(self_hosted||generalChanSite){
|
||||||
|
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
|
||||||
|
m = p.matcher(href);
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
String image = link.attr("href");
|
if (href.startsWith("//")) {
|
||||||
if (image.startsWith("//")) {
|
href = "http:" + href;
|
||||||
image = "http:" + image;
|
|
||||||
}
|
}
|
||||||
if (image.startsWith("/")) {
|
if (href.startsWith("/")) {
|
||||||
image = "http://" + this.url.getHost() + image;
|
href = "http://" + this.url.getHost() + href;
|
||||||
}
|
}
|
||||||
// Don't download the same URL twice
|
// Don't download the same URL twice
|
||||||
if (imageURLs.contains(image)) {
|
if (imageURLs.contains(href)) {
|
||||||
logger.debug("Already attempted: " + image);
|
logger.debug("Already attempted: " + href);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
imageURLs.add(image);
|
imageURLs.add(href);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//TODO also grab imgur/flickr albums (And all other supported rippers) Maybe add a setting?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return imageURLs;
|
return imageURLs;
|
||||||
@ -122,5 +170,4 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
public void downloadURL(URL url, int index) {
|
public void downloadURL(URL url, int index) {
|
||||||
addURLToDownload(url, getPrefix(index));
|
addURLToDownload(url, getPrefix(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -13,31 +13,34 @@ import org.jsoup.nodes.Element;
|
|||||||
import com.rarchives.ripme.ripper.AlbumRipper;
|
import com.rarchives.ripme.ripper.AlbumRipper;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
import org.jsoup.select.Elements;
|
||||||
|
|
||||||
public class VineboxRipper extends AlbumRipper {
|
public class FineboxRipper extends AlbumRipper {
|
||||||
|
|
||||||
private static final String DOMAIN = "vinebox.co",
|
private static final String DOMAIN = "finebox.co",
|
||||||
HOST = "vinebox";
|
DOMAIN_OLD = "vinebox.co",
|
||||||
|
HOST = "finebox";
|
||||||
|
|
||||||
public VineboxRipper(URL url) throws IOException {
|
public FineboxRipper(URL url) throws IOException {
|
||||||
super(url);
|
super(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean canRip(URL url) {
|
public boolean canRip(URL url) {
|
||||||
return url.getHost().endsWith(DOMAIN);
|
return url.getHost().endsWith(DOMAIN) || url.getHost().endsWith(DOMAIN_OLD);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||||
return new URL("http://vinebox.co/u/" + getGID(url));
|
return new URL("http://"+DOMAIN+"/u/" + getGID(url));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
int page = 0;
|
int page = 0;
|
||||||
Document doc;
|
Document doc;
|
||||||
while (true) {
|
Boolean hasPagesLeft = true;
|
||||||
|
while (hasPagesLeft) {
|
||||||
page++;
|
page++;
|
||||||
String urlPaged = this.url.toExternalForm() + "?page=" + page;
|
String urlPaged = this.url.toExternalForm() + "?page=" + page;
|
||||||
logger.info("Retrieving " + urlPaged);
|
logger.info("Retrieving " + urlPaged);
|
||||||
@ -48,8 +51,16 @@ public class VineboxRipper extends AlbumRipper {
|
|||||||
logger.debug("Hit end of pages at page " + page, e);
|
logger.debug("Hit end of pages at page " + page, e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for (Element element : doc.select("video")) {
|
Elements videos = doc.select("video");
|
||||||
addURLToDownload(new URL(element.attr("src")));
|
for (Element element : videos) {
|
||||||
|
String videourl = element.attr("src");
|
||||||
|
if(videourl.substring(0,4)!="http"){
|
||||||
|
videourl = "http://"+DOMAIN+ videourl;
|
||||||
|
}
|
||||||
|
if(!addURLToDownload(new URL(videourl))){
|
||||||
|
hasPagesLeft = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
@ -68,10 +79,10 @@ public class VineboxRipper extends AlbumRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getGID(URL url) throws MalformedURLException {
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
Pattern p = Pattern.compile("^https?://(www\\.)?vinebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
|
Pattern p = Pattern.compile("^https?://(www\\.)?(v|f)inebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
|
||||||
Matcher m = p.matcher(url.toExternalForm());
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
if (!m.matches()) {
|
if (!m.matches()) {
|
||||||
throw new MalformedURLException("Expected format: http://vinebox.co/u/USERNAME");
|
throw new MalformedURLException("Expected format: http://"+DOMAIN+"/u/USERNAME");
|
||||||
}
|
}
|
||||||
return m.group(m.groupCount());
|
return m.group(m.groupCount());
|
||||||
}
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers.ripperhelpers;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class ChanSite {
|
||||||
|
//The domains where the threads are hosted.
|
||||||
|
public List<String> domains;
|
||||||
|
//The domains where the images are hosted.
|
||||||
|
public List<String> cdnDomains;
|
||||||
|
|
||||||
|
public ChanSite(List<String> Domains, List<String> CdnDomains){
|
||||||
|
if(Domains.isEmpty())
|
||||||
|
throw new IllegalArgumentException("Domains");
|
||||||
|
if(CdnDomains.isEmpty())
|
||||||
|
throw new IllegalArgumentException("CdnDomains");
|
||||||
|
domains = Domains;
|
||||||
|
cdnDomains = CdnDomains;
|
||||||
|
}
|
||||||
|
public ChanSite(List<String> Domains){
|
||||||
|
if(Domains.isEmpty())
|
||||||
|
throw new IllegalArgumentException("Domains");
|
||||||
|
domains = Domains;
|
||||||
|
cdnDomains = Domains;
|
||||||
|
}
|
||||||
|
}
|
@ -448,7 +448,7 @@ public class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
AbstractRipper ripper = AbstractRipper.getRipper(url);
|
AbstractRipper ripper = AbstractRipper.getRipper(url);
|
||||||
statusWithColor(ripper.getHost() + " album detected", Color.GREEN);
|
statusWithColor(ripper.getHost() + " album detected", Color.GREEN);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
statusWithColor("Can't rip this URL", Color.RED);
|
statusWithColor("Can't rip this URL: "+e.getMessage(), Color.RED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -27,18 +27,20 @@ public class ChanRipperTest extends RippersTest {
|
|||||||
List<URL> passURLs = new ArrayList<URL>();
|
List<URL> passURLs = new ArrayList<URL>();
|
||||||
// URLs that should work
|
// URLs that should work
|
||||||
passURLs.add(new URL("http://desuchan.net/v/res/7034.html"));
|
passURLs.add(new URL("http://desuchan.net/v/res/7034.html"));
|
||||||
passURLs.add(new URL("http://boards.4chan.org/r/res/12225949"));
|
passURLs.add(new URL("http://boards.4chan.org/hr/thread/2214511"));
|
||||||
|
passURLs.add(new URL("http://fgts.jp/r/thread/12225949/"));
|
||||||
passURLs.add(new URL("http://boards.420chan.org/ana/res/75984.php"));
|
passURLs.add(new URL("http://boards.420chan.org/ana/res/75984.php"));
|
||||||
passURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
|
passURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
|
||||||
passURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
|
passURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
|
||||||
passURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
|
passURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
|
||||||
|
passURLs.add(new URL("http://archive.moe/c/thread/2295132/"));
|
||||||
for (URL url : passURLs) {
|
for (URL url : passURLs) {
|
||||||
try {
|
try {
|
||||||
ChanRipper ripper = new ChanRipper(url);
|
ChanRipper ripper = new ChanRipper(url);
|
||||||
assert(ripper.canRip(url));
|
assert(ripper.canRip(url));
|
||||||
deleteDir(ripper.getWorkingDir());
|
deleteDir(ripper.getWorkingDir());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
fail("Failed to instantiate ripper for " + url);
|
fail("Failed to instantiate ripper for " + url + " with message: "+e.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -55,6 +57,7 @@ public class ChanRipperTest extends RippersTest {
|
|||||||
contentURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
|
contentURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
|
||||||
contentURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
|
contentURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
|
||||||
contentURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
|
contentURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
|
||||||
|
contentURLs.add(new URL("http://archive.4plebs.org/hr/thread/2215899/"));
|
||||||
for (URL url : contentURLs) {
|
for (URL url : contentURLs) {
|
||||||
try {
|
try {
|
||||||
ChanRipper ripper = new ChanRipper(url);
|
ChanRipper ripper = new ChanRipper(url);
|
||||||
|
@ -5,9 +5,9 @@ import java.net.URL;
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.rippers.VineboxRipper;
|
import com.rarchives.ripme.ripper.rippers.FineboxRipper;
|
||||||
|
|
||||||
public class VineboxRipperTest extends RippersTest {
|
public class FineboxRipperTest extends RippersTest {
|
||||||
|
|
||||||
public void testVineboxAlbums() throws IOException {
|
public void testVineboxAlbums() throws IOException {
|
||||||
if (DOWNLOAD_CONTENT) {
|
if (DOWNLOAD_CONTENT) {
|
||||||
@ -15,9 +15,10 @@ public class VineboxRipperTest extends RippersTest {
|
|||||||
}
|
}
|
||||||
List<URL> contentURLs = new ArrayList<URL>();
|
List<URL> contentURLs = new ArrayList<URL>();
|
||||||
contentURLs.add(new URL("http://vinebox.co/u/wi57hMjc2Ka"));
|
contentURLs.add(new URL("http://vinebox.co/u/wi57hMjc2Ka"));
|
||||||
|
contentURLs.add(new URL("http://finebox.co/u/wi57hMjc2Ka"));
|
||||||
for (URL url : contentURLs) {
|
for (URL url : contentURLs) {
|
||||||
try {
|
try {
|
||||||
VineboxRipper ripper = new VineboxRipper(url);
|
FineboxRipper ripper = new FineboxRipper(url);
|
||||||
ripper.rip();
|
ripper.rip();
|
||||||
assert(ripper.getWorkingDir().listFiles().length > 1);
|
assert(ripper.getWorkingDir().listFiles().length > 1);
|
||||||
deleteDir(ripper.getWorkingDir());
|
deleteDir(ripper.getWorkingDir());
|
Loading…
Reference in New Issue
Block a user