diff --git a/build.bat b/build.bat
new file mode 100644
index 00000000..7e7c3221
--- /dev/null
+++ b/build.bat
@@ -0,0 +1 @@
+mvn clean compile assembly:single
\ No newline at end of file
diff --git a/nbactions.xml b/nbactions.xml
new file mode 100644
index 00000000..fa676bd9
--- /dev/null
+++ b/nbactions.xml
@@ -0,0 +1,17 @@
+
+
+
+ run
+
+ jar
+
+
+ process-classes
+ org.codehaus.mojo:exec-maven-plugin:1.2.1:exec
+
+
+ -classpath %classpath com.rarchives.ripme.App
+ java
+
+
+
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
index f674e5cc..e1ea4a23 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
@@ -94,14 +94,14 @@ public abstract class AbstractRipper
* @param saveAs
* Path of the local file to save the content to.
*/
- public abstract void addURLToDownload(URL url, File saveAs);
- public abstract void addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
+ public abstract boolean addURLToDownload(URL url, File saveAs);
+ public abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
- public void addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies) {
try {
stopCheck();
} catch (IOException e) {
- return;
+ return false;
}
String saveAs = url.toExternalForm();
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
@@ -122,14 +122,14 @@ public abstract class AbstractRipper
+ saveAs);
} catch (IOException e) {
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
- return;
+ return false;
}
logger.debug("Downloading " + url + " to " + saveFileAs);
if (!saveFileAs.getParentFile().exists()) {
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
saveFileAs.getParentFile().mkdirs();
}
- addURLToDownload(url, saveFileAs, referrer, cookies);
+ return addURLToDownload(url, saveFileAs, referrer, cookies);
}
/**
@@ -141,8 +141,8 @@ public abstract class AbstractRipper
* @param subdirectory
* Sub-directory of the working directory to save the images to.
*/
- public void addURLToDownload(URL url, String prefix, String subdirectory) {
- addURLToDownload(url, prefix, subdirectory, null, null);
+ public boolean addURLToDownload(URL url, String prefix, String subdirectory) {
+ return addURLToDownload(url, prefix, subdirectory, null, null);
}
/**
@@ -153,9 +153,9 @@ public abstract class AbstractRipper
* @param prefix
* Text to append to saved filename.
*/
- public void addURLToDownload(URL url, String prefix) {
+ public boolean addURLToDownload(URL url, String prefix) {
// Use empty subdirectory
- addURLToDownload(url, prefix, "");
+ return addURLToDownload(url, prefix, "");
}
/**
* Waits for downloading threads to complete.
diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
index 09f117af..b4135b22 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
@@ -33,14 +33,14 @@ public abstract class AlbumRipper extends AbstractRipper {
return false;
}
- public void addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
+ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies) {
if (!allowDuplicates()
&& ( itemsPending.containsKey(url)
|| itemsCompleted.containsKey(url)
|| itemsErrored.containsKey(url) )) {
// Item is already downloaded/downloading, skip it.
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
- return;
+ return false;
}
if (Utils.getConfigBoolean("urls_only.save", false)) {
// Output URL to file
@@ -68,11 +68,12 @@ public abstract class AlbumRipper extends AbstractRipper {
}
threadPool.addThread(dft);
}
+ return true;
}
@Override
- public void addURLToDownload(URL url, File saveAs) {
- addURLToDownload(url, saveAs, null, null);
+ public boolean addURLToDownload(URL url, File saveAs) {
+ return addURLToDownload(url, saveAs, null, null);
}
/**
@@ -80,10 +81,12 @@ public abstract class AlbumRipper extends AbstractRipper {
* Uses filename from URL to decide filename.
* @param url
* URL to download
+ * @return
+ * True on success
*/
- public void addURLToDownload(URL url) {
+ public boolean addURLToDownload(URL url) {
// Use empty prefix and empty subdirectory
- addURLToDownload(url, "", "");
+ return addURLToDownload(url, "", "");
}
@Override
@@ -146,6 +149,8 @@ public abstract class AlbumRipper extends AbstractRipper {
* Sets directory to save all ripped files to.
* @param url
* URL to define how the working directory should be saved.
+ * @throws
+ * IOException
*/
@Override
public void setWorkingDir(URL url) throws IOException {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
index 6f16d8c4..c76e1b7f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
@@ -12,12 +12,48 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
import com.rarchives.ripme.utils.Http;
+import java.util.Arrays;
public class ChanRipper extends AbstractHTMLRipper {
-
+
+ //ArrayList explicit_domains = new ArrayList();
+ public static List explicit_domains = Arrays.asList(
+ //Tested (main boards)
+ //Untested (main boards)
+ new ChanSite(Arrays.asList("anon-ib.com")),
+ new ChanSite(Arrays.asList("boards.4chan.org"),Arrays.asList("4cdn.org")),
+ //Tested (archives)
+ new ChanSite(Arrays.asList("archive.moe"),Arrays.asList("data.archive.moe")), //4chan archive (successor of foolz archive) Archives: [ a / biz / c / co / diy / gd / i / int / jp / m / mlp / out / po / q / s4s / sci / sp / tg / tv / v / vg / vp / vr / wsg ]
+ //Untested (archives)new ChanSite(Arrays.asList("anon-ib.com")),
+ new ChanSite(Arrays.asList("4archive.org"),Arrays.asList("imgur.com")), //4chan archive (on demand)
+ new ChanSite(Arrays.asList("archive.4plebs.org"),Arrays.asList("img.4plebs.org")), //4chan archive Archives: [ adv / f / hr / o / pol / s4s / tg / trv / tv / x ] Boards: [ plebs ]
+ new ChanSite(Arrays.asList("fgts.jp"),Arrays.asList("dat.fgts.jp")) //4chan archive Archives: [ asp / cm / h / hc / hm / n / p / r / s / soc / y ]
+ );
+ public static List url_piece_blacklist = Arrays.asList(
+ "=http",
+ "http://imgops.com/",
+ "iqdb.org",
+ "saucenao.com"
+ );
+
+ public ChanSite chanSite;
+ public Boolean generalChanSite = true;
+
public ChanRipper(URL url) throws IOException {
super(url);
+ for (ChanSite _chanSite : explicit_domains) {
+ for (String host : _chanSite.domains) {
+ if (url.getHost().equals(host)) {
+ chanSite = _chanSite;
+ generalChanSite = false;
+ }
+ }
+ }
+ if(chanSite==null){
+ chanSite = new ChanSite(Arrays.asList("url.getHost()"));
+ }
}
@Override
@@ -33,39 +69,40 @@ public class ChanRipper extends AbstractHTMLRipper {
}
@Override
- public boolean canRip(URL url) {
- // TODO Whitelist?
- if (url.getHost().equals("anon-ib.com")) {
- return true;
+ public boolean canRip(URL url) {
+ //explicit_domains testing
+ for (ChanSite _chanSite : explicit_domains) {
+ for (String host : _chanSite.domains) {
+ if (url.getHost().equals(host)) {
+ return true;
+ }
+ }
}
- return url.getHost().contains("chan") &&
- ( url.toExternalForm().contains("/res/") // Most chans
- || url.toExternalForm().contains("/thread/")); // 4chan
+ //It'll fail further down the road.
+ return url.toExternalForm().contains("/res/") // Most chans
+ || url.toExternalForm().contains("/thread/"); // 4chan, archive.moe
}
-
+ /**
+ * For example the achrives are all known. (Check 4chan-x)
+ * Should be based on the software the specific chan uses.
+ * FoolFuuka uses the same (url) layout as 4chan
+ * */
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p; Matcher m;
- String u = url.toExternalForm();
- if (u.contains("/res/")) {
- p = Pattern.compile("^.*(chan|anon-ib).*\\.[a-z]{2,3}/[a-zA-Z0-9/]+/res/([0-9]+)(\\.html|\\.php)?.*$");
+ String u = url.toExternalForm();
+ if (u.contains("/thread/")||u.contains("/res/")) {
+ p = Pattern.compile("^.*\\.[a-z]{1,3}/[a-zA-Z0-9]+/(thread|res)/([0-9]+)(\\.html|\\.php)?.*$");
m = p.matcher(u);
if (m.matches()) {
return m.group(2);
}
}
- else if (u.contains("/thread/")) {
- p = Pattern.compile("^.*chan.*\\.[a-z]{2,3}/[a-zA-Z0-9]+/thread/([0-9]+)(\\.html|\\.php)?.*$");
- m = p.matcher(u);
- if (m.matches()) {
- return m.group(1);
- }
- }
throw new MalformedURLException(
"Expected *chan URL formats: "
- + "*chan.com/@/res/####.html"
+ + ".*/@/(res|thread)/####.html"
+ " Got: " + u);
}
@@ -83,37 +120,48 @@ public class ChanRipper extends AbstractHTMLRipper {
public List getURLsFromPage(Document page) {
List imageURLs = new ArrayList();
Pattern p; Matcher m;
+ elementloop:
for (Element link : page.select("a")) {
if (!link.hasAttr("href")) {
continue;
}
- if (!link.attr("href").contains("/src/")
- && !link.attr("href").contains("4cdn.org")) {
- logger.debug("Skipping link that does not contain /src/: " + link.attr("href"));
- continue;
+ String href = link.attr("href");
+
+ //Check all blacklist items
+ for(String blacklist_item : url_piece_blacklist){
+ if (href.contains(blacklist_item)){
+ logger.debug("Skipping link that contains '"+blacklist_item+"': " + href);
+ continue elementloop;
+ }
}
- if (link.attr("href").contains("=http")
- || link.attr("href").contains("http://imgops.com/")) {
- logger.debug("Skipping link that contains '=http' or 'imgops.com': " + link.attr("href"));
- continue;
+ Boolean self_hosted = false;
+ if(!generalChanSite){
+ for(String cdnDomain : chanSite.cdnDomains){
+ if (href.contains(cdnDomain)){
+ self_hosted = true;
+ }
+ }
}
- p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|webm)$", Pattern.CASE_INSENSITIVE);
- m = p.matcher(link.attr("href"));
- if (m.matches()) {
- String image = link.attr("href");
- if (image.startsWith("//")) {
- image = "http:" + image;
+ if(self_hosted||generalChanSite){
+ p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
+ m = p.matcher(href);
+ if (m.matches()) {
+ if (href.startsWith("//")) {
+ href = "http:" + href;
+ }
+ if (href.startsWith("/")) {
+ href = "http://" + this.url.getHost() + href;
+ }
+ // Don't download the same URL twice
+ if (imageURLs.contains(href)) {
+ logger.debug("Already attempted: " + href);
+ continue;
+ }
+ imageURLs.add(href);
}
- if (image.startsWith("/")) {
- image = "http://" + this.url.getHost() + image;
- }
- // Don't download the same URL twice
- if (imageURLs.contains(image)) {
- logger.debug("Already attempted: " + image);
- continue;
- }
- imageURLs.add(image);
- }
+ } else {
+ //TODO also grab imgur/flickr albums (And all other supported rippers) Maybe add a setting?
+ }
}
return imageURLs;
}
@@ -121,6 +169,5 @@ public class ChanRipper extends AbstractHTMLRipper {
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
- }
-
-}
\ No newline at end of file
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VineboxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
similarity index 60%
rename from src/main/java/com/rarchives/ripme/ripper/rippers/VineboxRipper.java
rename to src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
index e04cac14..dc1393f6 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/VineboxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
@@ -13,31 +13,34 @@ import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AlbumRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
+import org.jsoup.select.Elements;
-public class VineboxRipper extends AlbumRipper {
+public class FineboxRipper extends AlbumRipper {
- private static final String DOMAIN = "vinebox.co",
- HOST = "vinebox";
+ private static final String DOMAIN = "finebox.co",
+ DOMAIN_OLD = "vinebox.co",
+ HOST = "finebox";
- public VineboxRipper(URL url) throws IOException {
+ public FineboxRipper(URL url) throws IOException {
super(url);
}
@Override
public boolean canRip(URL url) {
- return url.getHost().endsWith(DOMAIN);
+ return url.getHost().endsWith(DOMAIN) || url.getHost().endsWith(DOMAIN_OLD);
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
- return new URL("http://vinebox.co/u/" + getGID(url));
+ return new URL("http://"+DOMAIN+"/u/" + getGID(url));
}
@Override
public void rip() throws IOException {
int page = 0;
Document doc;
- while (true) {
+ Boolean hasPagesLeft = true;
+ while (hasPagesLeft) {
page++;
String urlPaged = this.url.toExternalForm() + "?page=" + page;
logger.info("Retrieving " + urlPaged);
@@ -47,10 +50,18 @@ public class VineboxRipper extends AlbumRipper {
} catch (HttpStatusException e) {
logger.debug("Hit end of pages at page " + page, e);
break;
- }
- for (Element element : doc.select("video")) {
- addURLToDownload(new URL(element.attr("src")));
- }
+ }
+ Elements videos = doc.select("video");
+ for (Element element : videos) {
+ String videourl = element.attr("src");
+ if(videourl.substring(0,4)!="http"){
+ videourl = "http://"+DOMAIN+ videourl;
+ }
+ if(!addURLToDownload(new URL(videourl))){
+ hasPagesLeft = false;
+ break;
+ }
+ }
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
@@ -68,10 +79,10 @@ public class VineboxRipper extends AlbumRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^https?://(www\\.)?vinebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
+ Pattern p = Pattern.compile("^https?://(www\\.)?(v|f)inebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
Matcher m = p.matcher(url.toExternalForm());
if (!m.matches()) {
- throw new MalformedURLException("Expected format: http://vinebox.co/u/USERNAME");
+ throw new MalformedURLException("Expected format: http://"+DOMAIN+"/u/USERNAME");
}
return m.group(m.groupCount());
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ripperhelpers/ChanSite.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ripperhelpers/ChanSite.java
new file mode 100644
index 00000000..c8ec3d4f
--- /dev/null
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ripperhelpers/ChanSite.java
@@ -0,0 +1,25 @@
+package com.rarchives.ripme.ripper.rippers.ripperhelpers;
+
+import java.util.List;
+
+public class ChanSite {
+ //The domains where the threads are hosted.
+ public List domains;
+ //The domains where the images are hosted.
+ public List cdnDomains;
+
+ public ChanSite(List Domains, List CdnDomains){
+ if(Domains.isEmpty())
+ throw new IllegalArgumentException("Domains");
+ if(CdnDomains.isEmpty())
+ throw new IllegalArgumentException("CdnDomains");
+ domains = Domains;
+ cdnDomains = CdnDomains;
+ }
+ public ChanSite(List Domains){
+ if(Domains.isEmpty())
+ throw new IllegalArgumentException("Domains");
+ domains = Domains;
+ cdnDomains = Domains;
+ }
+}
diff --git a/src/main/java/com/rarchives/ripme/ui/MainWindow.java b/src/main/java/com/rarchives/ripme/ui/MainWindow.java
index 6a962e3a..0eebffce 100644
--- a/src/main/java/com/rarchives/ripme/ui/MainWindow.java
+++ b/src/main/java/com/rarchives/ripme/ui/MainWindow.java
@@ -448,7 +448,7 @@ public class MainWindow implements Runnable, RipStatusHandler {
AbstractRipper ripper = AbstractRipper.getRipper(url);
statusWithColor(ripper.getHost() + " album detected", Color.GREEN);
} catch (Exception e) {
- statusWithColor("Can't rip this URL", Color.RED);
+ statusWithColor("Can't rip this URL: "+e.getMessage(), Color.RED);
}
}
});
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java
index 6f6a77c4..b1f48107 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java
@@ -27,18 +27,20 @@ public class ChanRipperTest extends RippersTest {
List passURLs = new ArrayList();
// URLs that should work
passURLs.add(new URL("http://desuchan.net/v/res/7034.html"));
- passURLs.add(new URL("http://boards.4chan.org/r/res/12225949"));
+ passURLs.add(new URL("http://boards.4chan.org/hr/thread/2214511"));
+ passURLs.add(new URL("http://fgts.jp/r/thread/12225949/"));
passURLs.add(new URL("http://boards.420chan.org/ana/res/75984.php"));
passURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
passURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
passURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
+ passURLs.add(new URL("http://archive.moe/c/thread/2295132/"));
for (URL url : passURLs) {
try {
ChanRipper ripper = new ChanRipper(url);
assert(ripper.canRip(url));
deleteDir(ripper.getWorkingDir());
} catch (Exception e) {
- fail("Failed to instantiate ripper for " + url);
+ fail("Failed to instantiate ripper for " + url + " with message: "+e.toString());
}
}
}
@@ -55,6 +57,7 @@ public class ChanRipperTest extends RippersTest {
contentURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
contentURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
contentURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
+ contentURLs.add(new URL("http://archive.4plebs.org/hr/thread/2215899/"));
for (URL url : contentURLs) {
try {
ChanRipper ripper = new ChanRipper(url);
diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineboxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FineboxRipperTest.java
similarity index 76%
rename from src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineboxRipperTest.java
rename to src/test/java/com/rarchives/ripme/tst/ripper/rippers/FineboxRipperTest.java
index 734d4cbd..92cc4f24 100644
--- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VineboxRipperTest.java
+++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FineboxRipperTest.java
@@ -5,9 +5,9 @@ import java.net.URL;
import java.util.ArrayList;
import java.util.List;
-import com.rarchives.ripme.ripper.rippers.VineboxRipper;
+import com.rarchives.ripme.ripper.rippers.FineboxRipper;
-public class VineboxRipperTest extends RippersTest {
+public class FineboxRipperTest extends RippersTest {
public void testVineboxAlbums() throws IOException {
if (DOWNLOAD_CONTENT) {
@@ -15,9 +15,10 @@ public class VineboxRipperTest extends RippersTest {
}
List contentURLs = new ArrayList();
contentURLs.add(new URL("http://vinebox.co/u/wi57hMjc2Ka"));
+ contentURLs.add(new URL("http://finebox.co/u/wi57hMjc2Ka"));
for (URL url : contentURLs) {
try {
- VineboxRipper ripper = new VineboxRipper(url);
+ FineboxRipper ripper = new FineboxRipper(url);
ripper.rip();
assert(ripper.getWorkingDir().listFiles().length > 1);
deleteDir(ripper.getWorkingDir());