Merge pull request #116 from EraYaN/master

Made ChanRipper more universal.
This commit is contained in:
4_pr0n 2014-10-21 01:14:11 -07:00
commit f82c1f56ef
10 changed files with 192 additions and 82 deletions

1
build.bat Normal file
View File

@ -0,0 +1 @@
mvn clean compile assembly:single

17
nbactions.xml Normal file
View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<actions>
<action>
<actionName>run</actionName>
<packagings>
<packaging>jar</packaging>
</packagings>
<goals>
<goal>process-classes</goal>
<goal>org.codehaus.mojo:exec-maven-plugin:1.2.1:exec</goal>
</goals>
<properties>
<exec.args>-classpath %classpath com.rarchives.ripme.App</exec.args>
<exec.executable>java</exec.executable>
</properties>
</action>
</actions>

View File

@ -94,14 +94,14 @@ public abstract class AbstractRipper
* @param saveAs
* Path of the local file to save the content to.
*/
public abstract void addURLToDownload(URL url, File saveAs);
public abstract void addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies);
public abstract boolean addURLToDownload(URL url, File saveAs);
public abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies);
public void addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies) {
public boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String,String> cookies) {
try {
stopCheck();
} catch (IOException e) {
return;
return false;
}
String saveAs = url.toExternalForm();
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
@ -122,14 +122,14 @@ public abstract class AbstractRipper
+ saveAs);
} catch (IOException e) {
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
return;
return false;
}
logger.debug("Downloading " + url + " to " + saveFileAs);
if (!saveFileAs.getParentFile().exists()) {
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
saveFileAs.getParentFile().mkdirs();
}
addURLToDownload(url, saveFileAs, referrer, cookies);
return addURLToDownload(url, saveFileAs, referrer, cookies);
}
/**
@ -141,8 +141,8 @@ public abstract class AbstractRipper
* @param subdirectory
* Sub-directory of the working directory to save the images to.
*/
public void addURLToDownload(URL url, String prefix, String subdirectory) {
addURLToDownload(url, prefix, subdirectory, null, null);
public boolean addURLToDownload(URL url, String prefix, String subdirectory) {
return addURLToDownload(url, prefix, subdirectory, null, null);
}
/**
@ -153,9 +153,9 @@ public abstract class AbstractRipper
* @param prefix
* Text to append to saved filename.
*/
public void addURLToDownload(URL url, String prefix) {
public boolean addURLToDownload(URL url, String prefix) {
// Use empty subdirectory
addURLToDownload(url, prefix, "");
return addURLToDownload(url, prefix, "");
}
/**
* Waits for downloading threads to complete.

View File

@ -33,14 +33,14 @@ public abstract class AlbumRipper extends AbstractRipper {
return false;
}
public void addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies) {
if (!allowDuplicates()
&& ( itemsPending.containsKey(url)
|| itemsCompleted.containsKey(url)
|| itemsErrored.containsKey(url) )) {
// Item is already downloaded/downloading, skip it.
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
return;
return false;
}
if (Utils.getConfigBoolean("urls_only.save", false)) {
// Output URL to file
@ -68,11 +68,12 @@ public abstract class AlbumRipper extends AbstractRipper {
}
threadPool.addThread(dft);
}
return true;
}
@Override
public void addURLToDownload(URL url, File saveAs) {
addURLToDownload(url, saveAs, null, null);
public boolean addURLToDownload(URL url, File saveAs) {
return addURLToDownload(url, saveAs, null, null);
}
/**
@ -80,10 +81,12 @@ public abstract class AlbumRipper extends AbstractRipper {
* Uses filename from URL to decide filename.
* @param url
* URL to download
* @return
* True on success
*/
public void addURLToDownload(URL url) {
public boolean addURLToDownload(URL url) {
// Use empty prefix and empty subdirectory
addURLToDownload(url, "", "");
return addURLToDownload(url, "", "");
}
@Override
@ -146,6 +149,8 @@ public abstract class AlbumRipper extends AbstractRipper {
* Sets directory to save all ripped files to.
* @param url
* URL to define how the working directory should be saved.
* @throws
* IOException
*/
@Override
public void setWorkingDir(URL url) throws IOException {

View File

@ -12,12 +12,48 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
import com.rarchives.ripme.utils.Http;
import java.util.Arrays;
public class ChanRipper extends AbstractHTMLRipper {
//ArrayList<String> explicit_domains = new ArrayList<String>();
public static List<ChanSite> explicit_domains = Arrays.asList(
//Tested (main boards)
//Untested (main boards)
new ChanSite(Arrays.asList("anon-ib.com")),
new ChanSite(Arrays.asList("boards.4chan.org"),Arrays.asList("4cdn.org")),
//Tested (archives)
new ChanSite(Arrays.asList("archive.moe"),Arrays.asList("data.archive.moe")), //4chan archive (successor of foolz archive) Archives: [ a / biz / c / co / diy / gd / i / int / jp / m / mlp / out / po / q / s4s / sci / sp / tg / tv / v / vg / vp / vr / wsg ]
//Untested (archives)new ChanSite(Arrays.asList("anon-ib.com")),
new ChanSite(Arrays.asList("4archive.org"),Arrays.asList("imgur.com")), //4chan archive (on demand)
new ChanSite(Arrays.asList("archive.4plebs.org"),Arrays.asList("img.4plebs.org")), //4chan archive Archives: [ adv / f / hr / o / pol / s4s / tg / trv / tv / x ] Boards: [ plebs ]
new ChanSite(Arrays.asList("fgts.jp"),Arrays.asList("dat.fgts.jp")) //4chan archive Archives: [ asp / cm / h / hc / hm / n / p / r / s / soc / y ]
);
public static List<String> url_piece_blacklist = Arrays.asList(
"=http",
"http://imgops.com/",
"iqdb.org",
"saucenao.com"
);
public ChanSite chanSite;
public Boolean generalChanSite = true;
public ChanRipper(URL url) throws IOException {
super(url);
for (ChanSite _chanSite : explicit_domains) {
for (String host : _chanSite.domains) {
if (url.getHost().equals(host)) {
chanSite = _chanSite;
generalChanSite = false;
}
}
}
if(chanSite==null){
chanSite = new ChanSite(Arrays.asList("url.getHost()"));
}
}
@Override
@ -34,38 +70,39 @@ public class ChanRipper extends AbstractHTMLRipper {
@Override
public boolean canRip(URL url) {
// TODO Whitelist?
if (url.getHost().equals("anon-ib.com")) {
//explicit_domains testing
for (ChanSite _chanSite : explicit_domains) {
for (String host : _chanSite.domains) {
if (url.getHost().equals(host)) {
return true;
}
return url.getHost().contains("chan") &&
( url.toExternalForm().contains("/res/") // Most chans
|| url.toExternalForm().contains("/thread/")); // 4chan
}
}
//It'll fail further down the road.
return url.toExternalForm().contains("/res/") // Most chans
|| url.toExternalForm().contains("/thread/"); // 4chan, archive.moe
}
/**
* For example the achrives are all known. (Check 4chan-x)
* Should be based on the software the specific chan uses.
* FoolFuuka uses the same (url) layout as 4chan
* */
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p; Matcher m;
String u = url.toExternalForm();
if (u.contains("/res/")) {
p = Pattern.compile("^.*(chan|anon-ib).*\\.[a-z]{2,3}/[a-zA-Z0-9/]+/res/([0-9]+)(\\.html|\\.php)?.*$");
if (u.contains("/thread/")||u.contains("/res/")) {
p = Pattern.compile("^.*\\.[a-z]{1,3}/[a-zA-Z0-9]+/(thread|res)/([0-9]+)(\\.html|\\.php)?.*$");
m = p.matcher(u);
if (m.matches()) {
return m.group(2);
}
}
else if (u.contains("/thread/")) {
p = Pattern.compile("^.*chan.*\\.[a-z]{2,3}/[a-zA-Z0-9]+/thread/([0-9]+)(\\.html|\\.php)?.*$");
m = p.matcher(u);
if (m.matches()) {
return m.group(1);
}
}
throw new MalformedURLException(
"Expected *chan URL formats: "
+ "*chan.com/@/res/####.html"
+ ".*/@/(res|thread)/####.html"
+ " Got: " + u);
}
@ -83,36 +120,47 @@ public class ChanRipper extends AbstractHTMLRipper {
public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>();
Pattern p; Matcher m;
elementloop:
for (Element link : page.select("a")) {
if (!link.hasAttr("href")) {
continue;
}
if (!link.attr("href").contains("/src/")
&& !link.attr("href").contains("4cdn.org")) {
logger.debug("Skipping link that does not contain /src/: " + link.attr("href"));
continue;
String href = link.attr("href");
//Check all blacklist items
for(String blacklist_item : url_piece_blacklist){
if (href.contains(blacklist_item)){
logger.debug("Skipping link that contains '"+blacklist_item+"': " + href);
continue elementloop;
}
if (link.attr("href").contains("=http")
|| link.attr("href").contains("http://imgops.com/")) {
logger.debug("Skipping link that contains '=http' or 'imgops.com': " + link.attr("href"));
continue;
}
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(link.attr("href"));
Boolean self_hosted = false;
if(!generalChanSite){
for(String cdnDomain : chanSite.cdnDomains){
if (href.contains(cdnDomain)){
self_hosted = true;
}
}
}
if(self_hosted||generalChanSite){
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(href);
if (m.matches()) {
String image = link.attr("href");
if (image.startsWith("//")) {
image = "http:" + image;
if (href.startsWith("//")) {
href = "http:" + href;
}
if (image.startsWith("/")) {
image = "http://" + this.url.getHost() + image;
if (href.startsWith("/")) {
href = "http://" + this.url.getHost() + href;
}
// Don't download the same URL twice
if (imageURLs.contains(image)) {
logger.debug("Already attempted: " + image);
if (imageURLs.contains(href)) {
logger.debug("Already attempted: " + href);
continue;
}
imageURLs.add(image);
imageURLs.add(href);
}
} else {
//TODO also grab imgur/flickr albums (And all other supported rippers) Maybe add a setting?
}
}
return imageURLs;
@ -122,5 +170,4 @@ public class ChanRipper extends AbstractHTMLRipper {
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@ -13,31 +13,34 @@ import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AlbumRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
import org.jsoup.select.Elements;
public class VineboxRipper extends AlbumRipper {
public class FineboxRipper extends AlbumRipper {
private static final String DOMAIN = "vinebox.co",
HOST = "vinebox";
private static final String DOMAIN = "finebox.co",
DOMAIN_OLD = "vinebox.co",
HOST = "finebox";
public VineboxRipper(URL url) throws IOException {
public FineboxRipper(URL url) throws IOException {
super(url);
}
@Override
public boolean canRip(URL url) {
return url.getHost().endsWith(DOMAIN);
return url.getHost().endsWith(DOMAIN) || url.getHost().endsWith(DOMAIN_OLD);
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://vinebox.co/u/" + getGID(url));
return new URL("http://"+DOMAIN+"/u/" + getGID(url));
}
@Override
public void rip() throws IOException {
int page = 0;
Document doc;
while (true) {
Boolean hasPagesLeft = true;
while (hasPagesLeft) {
page++;
String urlPaged = this.url.toExternalForm() + "?page=" + page;
logger.info("Retrieving " + urlPaged);
@ -48,8 +51,16 @@ public class VineboxRipper extends AlbumRipper {
logger.debug("Hit end of pages at page " + page, e);
break;
}
for (Element element : doc.select("video")) {
addURLToDownload(new URL(element.attr("src")));
Elements videos = doc.select("video");
for (Element element : videos) {
String videourl = element.attr("src");
if(videourl.substring(0,4)!="http"){
videourl = "http://"+DOMAIN+ videourl;
}
if(!addURLToDownload(new URL(videourl))){
hasPagesLeft = false;
break;
}
}
try {
Thread.sleep(1000);
@ -68,10 +79,10 @@ public class VineboxRipper extends AlbumRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://(www\\.)?vinebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
Pattern p = Pattern.compile("^https?://(www\\.)?(v|f)inebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
Matcher m = p.matcher(url.toExternalForm());
if (!m.matches()) {
throw new MalformedURLException("Expected format: http://vinebox.co/u/USERNAME");
throw new MalformedURLException("Expected format: http://"+DOMAIN+"/u/USERNAME");
}
return m.group(m.groupCount());
}

View File

@ -0,0 +1,25 @@
package com.rarchives.ripme.ripper.rippers.ripperhelpers;
import java.util.List;
public class ChanSite {
//The domains where the threads are hosted.
public List<String> domains;
//The domains where the images are hosted.
public List<String> cdnDomains;
public ChanSite(List<String> Domains, List<String> CdnDomains){
if(Domains.isEmpty())
throw new IllegalArgumentException("Domains");
if(CdnDomains.isEmpty())
throw new IllegalArgumentException("CdnDomains");
domains = Domains;
cdnDomains = CdnDomains;
}
public ChanSite(List<String> Domains){
if(Domains.isEmpty())
throw new IllegalArgumentException("Domains");
domains = Domains;
cdnDomains = Domains;
}
}

View File

@ -448,7 +448,7 @@ public class MainWindow implements Runnable, RipStatusHandler {
AbstractRipper ripper = AbstractRipper.getRipper(url);
statusWithColor(ripper.getHost() + " album detected", Color.GREEN);
} catch (Exception e) {
statusWithColor("Can't rip this URL", Color.RED);
statusWithColor("Can't rip this URL: "+e.getMessage(), Color.RED);
}
}
});

View File

@ -27,18 +27,20 @@ public class ChanRipperTest extends RippersTest {
List<URL> passURLs = new ArrayList<URL>();
// URLs that should work
passURLs.add(new URL("http://desuchan.net/v/res/7034.html"));
passURLs.add(new URL("http://boards.4chan.org/r/res/12225949"));
passURLs.add(new URL("http://boards.4chan.org/hr/thread/2214511"));
passURLs.add(new URL("http://fgts.jp/r/thread/12225949/"));
passURLs.add(new URL("http://boards.420chan.org/ana/res/75984.php"));
passURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
passURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
passURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
passURLs.add(new URL("http://archive.moe/c/thread/2295132/"));
for (URL url : passURLs) {
try {
ChanRipper ripper = new ChanRipper(url);
assert(ripper.canRip(url));
deleteDir(ripper.getWorkingDir());
} catch (Exception e) {
fail("Failed to instantiate ripper for " + url);
fail("Failed to instantiate ripper for " + url + " with message: "+e.toString());
}
}
}
@ -55,6 +57,7 @@ public class ChanRipperTest extends RippersTest {
contentURLs.add(new URL("http://7chan.org/gif/res/23795.html"));
contentURLs.add(new URL("http://unichan2.org/b/res/518004.html"));
contentURLs.add(new URL("http://xchan.pw/porn/res/437.html"));
contentURLs.add(new URL("http://archive.4plebs.org/hr/thread/2215899/"));
for (URL url : contentURLs) {
try {
ChanRipper ripper = new ChanRipper(url);

View File

@ -5,9 +5,9 @@ import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import com.rarchives.ripme.ripper.rippers.VineboxRipper;
import com.rarchives.ripme.ripper.rippers.FineboxRipper;
public class VineboxRipperTest extends RippersTest {
public class FineboxRipperTest extends RippersTest {
public void testVineboxAlbums() throws IOException {
if (DOWNLOAD_CONTENT) {
@ -15,9 +15,10 @@ public class VineboxRipperTest extends RippersTest {
}
List<URL> contentURLs = new ArrayList<URL>();
contentURLs.add(new URL("http://vinebox.co/u/wi57hMjc2Ka"));
contentURLs.add(new URL("http://finebox.co/u/wi57hMjc2Ka"));
for (URL url : contentURLs) {
try {
VineboxRipper ripper = new VineboxRipper(url);
FineboxRipper ripper = new FineboxRipper(url);
ripper.rip();
assert(ripper.getWorkingDir().listFiles().length > 1);
deleteDir(ripper.getWorkingDir());