Merge branch 'formatting': Various code cleanup and formatting.

This commit is contained in:
MetaPrime 2017-05-09 17:51:19 -07:00
commit dc2c100d9d
52 changed files with 745 additions and 788 deletions

View File

@ -146,7 +146,7 @@ public class App {
try { try {
String url; String url;
BufferedReader br = new BufferedReader(new FileReader(filename)); BufferedReader br = new BufferedReader(new FileReader(filename));
while((url = br.readLine()) != null) { while ((url = br.readLine()) != null) {
// loop through each url in the file and proces each url individually. // loop through each url in the file and proces each url individually.
ripURL(url.trim(), cl.hasOption("n")); ripURL(url.trim(), cl.hasOption("n"));
} }
@ -171,7 +171,7 @@ public class App {
if (!history.contains(url.toExternalForm())) { if (!history.contains(url.toExternalForm())) {
history.add(url.toExternalForm()); history.add(url.toExternalForm());
Utils.setConfigList("download.history", Arrays.asList(history.toArray())); Utils.setConfigList("download.history", Arrays.asList(history.toArray()));
if(saveConfig) { if (saveConfig) {
Utils.saveConfig(); Utils.saveConfig();
} }
} }

View File

@ -158,13 +158,13 @@ public class ChanRipper extends AbstractHTMLRipper {
Boolean self_hosted = false; Boolean self_hosted = false;
if (!generalChanSite) { if (!generalChanSite) {
for (String cdnDomain : chanSite.cdnDomains) { for (String cdnDomain : chanSite.cdnDomains) {
if (href.contains(cdnDomain)){ if (href.contains(cdnDomain)) {
self_hosted = true; self_hosted = true;
} }
} }
} }
if (self_hosted || generalChanSite){ if (self_hosted || generalChanSite) {
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE); p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(href); m = p.matcher(href);
if (m.matches()) { if (m.matches()) {
@ -194,7 +194,7 @@ public class ChanRipper extends AbstractHTMLRipper {
} }
List<URL> urls = RipUtils.getFilesFromURL(originalURL); List<URL> urls = RipUtils.getFilesFromURL(originalURL);
for(URL imageurl : urls){ for (URL imageurl : urls) {
imageURLs.add(imageurl.toString()); imageURLs.add(imageurl.toString());
} }
} }

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers; package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -8,24 +7,16 @@ import com.rarchives.ripme.utils.Utils;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL; import java.net.URL;
import java.net.URLDecoder; import java.net.URLDecoder;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.logging.Logger; import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
/**
*
* @author
*/
public class E621Ripper extends AbstractHTMLRipper { public class E621Ripper extends AbstractHTMLRipper {
public static final int POOL_IMAGES_PER_PAGE = 24; public static final int POOL_IMAGES_PER_PAGE = 24;
@ -177,5 +168,4 @@ public class E621Ripper extends AbstractHTMLRipper {
return Utils.filesystemSafe(prefix + getTerm(url)); return Utils.filesystemSafe(prefix + getTerm(url));
} }
} }

View File

@ -9,7 +9,6 @@ import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
import org.jsoup.Connection.Method;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
/** /**
@ -45,7 +42,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
@Override @Override
public void downloadURL(URL url, int index){ public void downloadURL(URL url, int index) {
addURLToDownload(url); addURLToDownload(url);
} }
@ -66,12 +63,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override @Override
public List<String> getURLsFromPage(Document doc){ public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>(); List<String> URLs = new ArrayList<String>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(imageURL); URLs.add(imageURL);
@ -79,8 +76,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(videoURL); URLs.add(videoURL);
@ -122,8 +119,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>(); List<URL> URLs = new ArrayList<URL>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL)); URLs.add(new URL(imageURL));
@ -131,8 +128,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL)); URLs.add(new URL(videoURL));

View File

@ -59,8 +59,8 @@ public class FapprovedRipper extends AbstractHTMLRipper {
@Override @Override
public Document getNextPage(Document doc) throws IOException { public Document getNextPage(Document doc) throws IOException {
if ( (doc.select("div.pagination li.next.disabled").size() != 0) if ((doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0) ) { || (doc.select("div.pagination").size() == 0)) {
throw new IOException("No more pages found"); throw new IOException("No more pages found");
} }
sleep(1000); sleep(1000);

View File

@ -11,11 +11,8 @@ import java.util.regex.Pattern;
import org.json.JSONArray; import org.json.JSONArray;
import org.json.JSONException; import org.json.JSONException;
import org.json.JSONObject; import org.json.JSONObject;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractJSONRipper; import com.rarchives.ripme.ripper.AbstractJSONRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
public class InstagramRipper extends AbstractJSONRipper { public class InstagramRipper extends AbstractJSONRipper {
@ -65,7 +62,7 @@ public class InstagramRipper extends AbstractJSONRipper {
Pattern p = Pattern.compile("^https?://instagram\\.com/([^/]+)"); Pattern p = Pattern.compile("^https?://instagram\\.com/([^/]+)");
Matcher m = p.matcher(url.toExternalForm()); Matcher m = p.matcher(url.toExternalForm());
if(m.matches()) { if (m.matches()) {
return m.group(1); return m.group(1);
} }
@ -95,7 +92,7 @@ public class InstagramRipper extends AbstractJSONRipper {
throw new IOException("No additional pages found"); throw new IOException("No additional pages found");
} }
if(nextPageAvailable) { if (nextPageAvailable) {
JSONArray items = json.getJSONArray("items"); JSONArray items = json.getJSONArray("items");
JSONObject last_item = items.getJSONObject(items.length() - 1); JSONObject last_item = items.getJSONObject(items.length() - 1);
String nextMaxID = last_item.getString("id"); String nextMaxID = last_item.getString("id");

View File

@ -2,24 +2,15 @@ package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.Utils;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class MyhentaicomicsRipper extends AbstractHTMLRipper { public class MyhentaicomicsRipper extends AbstractHTMLRipper {
public static boolean isTag; public static boolean isTag;
@ -95,13 +86,13 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
List<String> albumPagesList = new ArrayList<String>(); List<String> albumPagesList = new ArrayList<String>();
int pageNumber = 1; int pageNumber = 1;
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber)); albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
while(true) { while (true) {
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber); String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
Document nextAlbumPage; Document nextAlbumPage;
try { try {
logger.info("Grabbing " + urlToGet); logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get(); nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e){ } catch(IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null; nextAlbumPage = null;
e.printStackTrace(); e.printStackTrace();
@ -109,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first(); Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href"); String nextPage = elem.attr("href");
pageNumber = pageNumber + 1; pageNumber = pageNumber + 1;
if(nextPage == ""){ if (nextPage == "") {
logger.info("Got " + pageNumber + " pages"); logger.info("Got " + pageNumber + " pages");
break; break;
} }
@ -145,7 +136,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else { else {
album_doc = Http.url(element).get(); album_doc = Http.url(element).get();
} }
} catch(IOException e){ } catch(IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
album_doc = null; album_doc = null;
e.printStackTrace(); e.printStackTrace();

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -13,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
public class NatalieMuRipper extends AbstractHTMLRipper { public class NatalieMuRipper extends AbstractHTMLRipper {

View File

@ -21,18 +21,14 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
/** public class PahealRipper extends AbstractHTMLRipper {
* private static Map<String, String> cookies = null;
* @author private static Pattern gidPattern = null;
*/
public class PahealRipper extends AbstractHTMLRipper{
private static Map<String,String> cookies=null;
private static Pattern gidPattern=null;
private static Map<String, String> getCookies() { private static Map<String, String> getCookies() {
if(cookies==null){ if (cookies == null) {
cookies=new HashMap<String, String>(1); cookies = new HashMap<String, String>(1);
cookies.put("ui-tnc-agreed","true"); cookies.put("ui-tnc-agreed", "true");
} }
return cookies; return cookies;
} }
@ -53,26 +49,28 @@ public class PahealRipper extends AbstractHTMLRipper{
@Override @Override
public Document getFirstPage() throws IOException { public Document getFirstPage() throws IOException {
return Http.url("http://rule34.paheal.net/post/list/"+getTerm(url)+"/1").cookies(getCookies()).get(); return Http.url("http://rule34.paheal.net/post/list/" + getTerm(url) + "/1").cookies(getCookies()).get();
} }
@Override @Override
public Document getNextPage(Document page) throws IOException { public Document getNextPage(Document page) throws IOException {
for(Element e:page.select("#paginator a")){ for (Element e : page.select("#paginator a")) {
if(e.text().toLowerCase().equals("next")) if (e.text().toLowerCase().equals("next")) {
return Http.url(e.absUrl("href")).cookies(getCookies()).get(); return Http.url(e.absUrl("href")).cookies(getCookies()).get();
} }
}
return null; return null;
} }
@Override @Override
public List<String> getURLsFromPage(Document page) { public List<String> getURLsFromPage(Document page) {
Elements elements=page.select(".shm-thumb.thumb>a").not(".shm-thumb-link"); Elements elements = page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
List<String> res=new ArrayList<String>(elements.size()); List<String> res = new ArrayList<String>(elements.size());
for(Element e:elements) for (Element e : elements) {
res.add(e.absUrl("href")); res.add(e.absUrl("href"));
}
return res; return res;
} }
@ -80,16 +78,20 @@ public class PahealRipper extends AbstractHTMLRipper{
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
try { try {
String name=url.getPath(); String name = url.getPath();
String ext=".png"; String ext = ".png";
name=name.substring(name.lastIndexOf('/')+1); name = name.substring(name.lastIndexOf('/') + 1);
if(name.indexOf('.')>=0){ if (name.indexOf('.') >= 0) {
ext=name.substring(name.lastIndexOf('.')); ext = name.substring(name.lastIndexOf('.'));
name=name.substring(0,name.length()-ext.length()); name = name.substring(0, name.length() - ext.length());
} }
addURLToDownload(url,new File(workingDir.getCanonicalPath()+File.separator+Utils.filesystemSafe(new URI(name).getPath())+ext)); File outFile = new File(workingDir.getCanonicalPath()
+ File.separator
+ Utils.filesystemSafe(new URI(name).getPath())
+ ext);
addURLToDownload(url, outFile);
} catch (IOException ex) { } catch (IOException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex); Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} catch (URISyntaxException ex) { } catch (URISyntaxException ex) {
@ -97,15 +99,17 @@ public class PahealRipper extends AbstractHTMLRipper{
} }
} }
private String getTerm(URL url) throws MalformedURLException{ private String getTerm(URL url) throws MalformedURLException {
if(gidPattern==null) if (gidPattern == null) {
gidPattern=Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$"); gidPattern = Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
}
Matcher m = gidPattern.matcher(url.toExternalForm()); Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches()) if (m.matches()) {
return m.group(2); return m.group(2);
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead"); throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
} }
@Override @Override
@ -116,7 +120,6 @@ public class PahealRipper extends AbstractHTMLRipper{
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex); Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} }
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead"); throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
} }
} }

View File

@ -132,7 +132,7 @@ public class RedditRipper extends AlbumRipper {
JSONArray jsonArray = new JSONArray(); JSONArray jsonArray = new JSONArray();
if (jsonObj instanceof JSONObject) { if (jsonObj instanceof JSONObject) {
jsonArray.put( (JSONObject) jsonObj); jsonArray.put( (JSONObject) jsonObj);
} else if (jsonObj instanceof JSONArray){ } else if (jsonObj instanceof JSONArray) {
jsonArray = (JSONArray) jsonObj; jsonArray = (JSONArray) jsonObj;
} else { } else {
logger.warn("[!] Unable to parse JSON: " + jsonString); logger.warn("[!] Unable to parse JSON: " + jsonString);

View File

@ -194,9 +194,9 @@ public class TumblrRipper extends AlbumRipper {
try { try {
fileURL = new URL(photo.getJSONObject("original_size").getString("url")); fileURL = new URL(photo.getJSONObject("original_size").getString("url"));
m = p.matcher(fileURL.toString()); m = p.matcher(fileURL.toString());
if(m.matches()) { if (m.matches()) {
addURLToDownload(fileURL); addURLToDownload(fileURL);
} else{ } else {
URL redirectedURL = Http.url(fileURL).ignoreContentType().response().url(); URL redirectedURL = Http.url(fileURL).ignoreContentType().response().url();
addURLToDownload(redirectedURL); addURLToDownload(redirectedURL);
} }

View File

@ -11,19 +11,18 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
public class WordpressComicRipper extends AbstractHTMLRipper { public class WordpressComicRipper extends AbstractHTMLRipper {
public WordpressComicRipper(URL url) throws IOException { public WordpressComicRipper(URL url) throws IOException {
super(url); super(url);
} }
public static List<String> explicit_domains = Arrays.asList("www.totempole666.com", public static List<String> explicit_domains = Arrays.asList("www.totempole666.com",
"buttsmithy.com", "themonsterunderthebed.net", "prismblush.com"); "buttsmithy.com", "themonsterunderthebed.net", "prismblush.com");
@Override @Override
public String getHost() { public String getHost() {
String host = url.toExternalForm().split("/")[2]; String host = url.toExternalForm().split("/")[2];
@ -63,8 +62,8 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
if (prismblushMat.matches()) { if (prismblushMat.matches()) {
return true; return true;
} }
} }
return false; return false;
} }
@ -97,7 +96,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
return super.getAlbumTitle(url); return super.getAlbumTitle(url);
} }
@Override @Override
public String getGID(URL url) throws MalformedURLException { public String getGID(URL url) throws MalformedURLException {
String url_name = url.toExternalForm(); String url_name = url.toExternalForm();
@ -129,10 +127,10 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
} }
nextPage = elem.attr("href"); nextPage = elem.attr("href");
} }
if (nextPage == "") { if (nextPage == "") {
throw new IOException("No more pages"); throw new IOException("No more pages");
} } else {
else {
return Http.url(nextPage).get(); return Http.url(nextPage).get();
} }
} }
@ -160,6 +158,4 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index)); addURLToDownload(url, getPrefix(index));
} }
}
}

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers; package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -18,12 +17,8 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
/** public class XbooruRipper extends AbstractHTMLRipper {
* private static Pattern gidPattern = null;
* @author
*/
public class XbooruRipper extends AbstractHTMLRipper{
private static Pattern gidPattern=null;
public XbooruRipper(URL url) throws IOException { public XbooruRipper(URL url) throws IOException {
super(url); super(url);
@ -39,8 +34,8 @@ public class XbooruRipper extends AbstractHTMLRipper{
return "xbooru"; return "xbooru";
} }
private String getPage(int num) throws MalformedURLException{ private String getPage(int num) throws MalformedURLException {
return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid="+num+"&tags="+getTerm(url); return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid=" + num + "&tags=" + getTerm(url);
} }
@Override @Override
@ -50,37 +45,41 @@ public class XbooruRipper extends AbstractHTMLRipper{
@Override @Override
public Document getNextPage(Document doc) throws IOException { public Document getNextPage(Document doc) throws IOException {
int offset=Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset")); int offset = Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
int num=Integer.parseInt(doc.getElementsByTag("posts").first().attr("count")); int num = Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
if(offset+100>num) if (offset + 100 > num) {
return null; return null;
}
return Http.url(getPage(offset/100+1)).get(); return Http.url(getPage(offset / 100 + 1)).get();
} }
@Override @Override
public List<String> getURLsFromPage(Document page) { public List<String> getURLsFromPage(Document page) {
List<String> res=new ArrayList<String>(100); List<String> res = new ArrayList<String>(100);
for(Element e:page.getElementsByTag("post")) for (Element e : page.getElementsByTag("post")) {
res.add(e.absUrl("file_url")+"#"+e.attr("id")); res.add(e.absUrl("file_url") + "#" + e.attr("id"));
}
return res; return res;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
addURLToDownload(url,Utils.getConfigBoolean("download.save_order",true)?url.getRef()+"-":""); addURLToDownload(url, Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
} }
private String getTerm(URL url) throws MalformedURLException{ private String getTerm(URL url) throws MalformedURLException {
if(gidPattern==null) if (gidPattern == null) {
gidPattern=Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)"); gidPattern = Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
}
Matcher m = gidPattern.matcher(url.toExternalForm()); Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches()) if (m.matches()) {
return m.group(4); return m.group(4);
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead"); throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
} }
@Override @Override
@ -91,7 +90,6 @@ public class XbooruRipper extends AbstractHTMLRipper{
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex); Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} }
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead"); throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
} }
} }

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -14,10 +13,8 @@ import java.util.regex.Pattern;
import org.jsoup.Connection.Response; import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
public class ZizkiRipper extends AbstractHTMLRipper { public class ZizkiRipper extends AbstractHTMLRipper {

View File

@ -3,22 +3,26 @@ package com.rarchives.ripme.ripper.rippers.ripperhelpers;
import java.util.List; import java.util.List;
public class ChanSite { public class ChanSite {
//The domains where the threads are hosted. // The domains where the threads are hosted.
public List<String> domains; public List<String> domains;
//The domains where the images are hosted. // The domains where the images are hosted.
public List<String> cdnDomains; public List<String> cdnDomains;
public ChanSite(List<String> Domains, List<String> CdnDomains){ public ChanSite(List<String> Domains, List<String> CdnDomains) {
if(Domains.isEmpty()) if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains"); throw new IllegalArgumentException("Domains");
if(CdnDomains.isEmpty()) }
if (CdnDomains.isEmpty()) {
throw new IllegalArgumentException("CdnDomains"); throw new IllegalArgumentException("CdnDomains");
}
domains = Domains; domains = Domains;
cdnDomains = CdnDomains; cdnDomains = CdnDomains;
} }
public ChanSite(List<String> Domains){
if(Domains.isEmpty()) public ChanSite(List<String> Domains) {
if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains"); throw new IllegalArgumentException("Domains");
}
domains = Domains; domains = Domains;
cdnDomains = Domains; cdnDomains = Domains;
} }

View File

@ -3,7 +3,6 @@ package com.rarchives.ripme.utils;
/** /**
* Base64 encoder/decoder * Base64 encoder/decoder
* From http://stackoverflow.com/a/4265472 * From http://stackoverflow.com/a/4265472
*
*/ */
public class Base64 { public class Base64 {
private final static char[] ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray(); private final static char[] ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray();
@ -11,8 +10,8 @@ public class Base64 {
private static int[] toInt = new int[128]; private static int[] toInt = new int[128];
static { static {
for(int i=0; i< ALPHABET.length; i++){ for (int i = 0; i < ALPHABET.length; i++) {
toInt[ALPHABET[i]]= i; toInt[ALPHABET[i]] = i;
} }
} }
@ -22,12 +21,12 @@ public class Base64 {
* @param buf the byte array (not null) * @param buf the byte array (not null)
* @return the translated Base64 string (not null) * @return the translated Base64 string (not null)
*/ */
public static String encode(byte[] buf){ public static String encode(byte[] buf) {
int size = buf.length; int size = buf.length;
char[] ar = new char[((size + 2) / 3) * 4]; char[] ar = new char[((size + 2) / 3) * 4];
int a = 0; int a = 0;
int i=0; int i = 0;
while(i < size){ while (i < size) {
byte b0 = buf[i++]; byte b0 = buf[i++];
byte b1 = (i < size) ? buf[i++] : 0; byte b1 = (i < size) ? buf[i++] : 0;
byte b2 = (i < size) ? buf[i++] : 0; byte b2 = (i < size) ? buf[i++] : 0;
@ -38,7 +37,7 @@ public class Base64 {
ar[a++] = ALPHABET[((b1 << 2) | ((b2 & 0xFF) >> 6)) & mask]; ar[a++] = ALPHABET[((b1 << 2) | ((b2 & 0xFF) >> 6)) & mask];
ar[a++] = ALPHABET[b2 & mask]; ar[a++] = ALPHABET[b2 & mask];
} }
switch(size % 3){ switch (size % 3) {
case 1: ar[--a] = '='; case 1: ar[--a] = '=';
case 2: ar[--a] = '='; case 2: ar[--a] = '=';
} }
@ -51,25 +50,25 @@ public class Base64 {
* @param s the Base64 string (not null) * @param s the Base64 string (not null)
* @return the byte array (not null) * @return the byte array (not null)
*/ */
public static byte[] decode(String s){ public static byte[] decode(String s) {
int delta = s.endsWith( "==" ) ? 2 : s.endsWith( "=" ) ? 1 : 0; int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0;
byte[] buffer = new byte[s.length()*3/4 - delta]; byte[] buffer = new byte[s.length() * 3 / 4 - delta];
int mask = 0xFF; int mask = 0xFF;
int index = 0; int index = 0;
for(int i=0; i< s.length(); i+=4){ for (int i = 0; i < s.length(); i += 4) {
int c0 = toInt[s.charAt( i )]; int c0 = toInt[s.charAt(i)];
int c1 = toInt[s.charAt( i + 1)]; int c1 = toInt[s.charAt(i + 1)];
buffer[index++]= (byte)(((c0 << 2) | (c1 >> 4)) & mask); buffer[index++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask);
if(index >= buffer.length){ if (index >= buffer.length) {
return buffer; return buffer;
} }
int c2 = toInt[s.charAt( i + 2)]; int c2 = toInt[s.charAt(i + 2)];
buffer[index++]= (byte)(((c1 << 4) | (c2 >> 2)) & mask); buffer[index++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask);
if(index >= buffer.length){ if (index >= buffer.length) {
return buffer; return buffer;
} }
int c3 = toInt[s.charAt( i + 3 )]; int c3 = toInt[s.charAt(i + 3)];
buffer[index++]= (byte)(((c2 << 6) | c3) & mask); buffer[index++] = (byte) (((c2 << 6) | c3) & mask);
} }
return buffer; return buffer;
} }

View File

@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.rippers.VidbleRipper;
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper; import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
import com.rarchives.ripme.ripper.rippers.EroShareRipper; import com.rarchives.ripme.ripper.rippers.EroShareRipper;
public class RipUtils { public class RipUtils {
private static final Logger logger = Logger.getLogger(RipUtils.class); private static final Logger logger = Logger.getLogger(RipUtils.class);
@ -119,7 +118,7 @@ public class RipUtils {
} }
if (url.getHost().equals("imgur.com") || if (url.getHost().equals("imgur.com") ||
url.getHost().equals("m.imgur.com")){ url.getHost().equals("m.imgur.com")) {
try { try {
// Fetch the page // Fetch the page
Document doc = Jsoup.connect(url.toExternalForm()) Document doc = Jsoup.connect(url.toExternalForm())
@ -165,18 +164,6 @@ public class RipUtils {
if (url == null) url = urlFromSiteDirectoryName(dir, "vinebox", "http://finebox.co/u/", ""); if (url == null) url = urlFromSiteDirectoryName(dir, "vinebox", "http://finebox.co/u/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "imgbox", "http://imgbox.com/g/", ""); if (url == null) url = urlFromSiteDirectoryName(dir, "imgbox", "http://imgbox.com/g/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "modelmayhem", "http://www.modelmayhem.com/", ""); if (url == null) url = urlFromSiteDirectoryName(dir, "modelmayhem", "http://www.modelmayhem.com/", "");
/*
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
*/
//if (url == null) url = urlFromSiteDirectoryName(dir, "8muses", "http://www.8muses.com/index/category/", ""); //if (url == null) url = urlFromSiteDirectoryName(dir, "8muses", "http://www.8muses.com/index/category/", "");
return url; return url;
} }
@ -248,9 +235,8 @@ public class RipUtils {
List<String> fields = Arrays.asList(dir.split("_")); List<String> fields = Arrays.asList(dir.split("_"));
String album = fields.get(1); String album = fields.get(1);
String url = "http://"; String url = "http://";
if ( (fields.contains("top") || fields.contains("new")) if ((fields.contains("top") || fields.contains("new"))
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all")) && (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))) {
) {
// Subreddit // Subreddit
fields.remove(0); // "imgur" fields.remove(0); // "imgur"
String sub = ""; String sub = "";

View File

@ -34,7 +34,6 @@ import com.rarchives.ripme.ripper.AbstractRipper;
* Common utility functions used in various places throughout the project. * Common utility functions used in various places throughout the project.
*/ */
public class Utils { public class Utils {
public static final String RIP_DIRECTORY = "rips"; public static final String RIP_DIRECTORY = "rips";
private static final String configFile = "rip.properties"; private static final String configFile = "rip.properties";
private static final Logger logger = Logger.getLogger(Utils.class); private static final Logger logger = Logger.getLogger(Utils.class);
@ -50,7 +49,7 @@ public class Utils {
} }
config = new PropertiesConfiguration(configPath); config = new PropertiesConfiguration(configPath);
logger.info("Loaded " + config.getPath()); logger.info("Loaded " + config.getPath());
if (f.exists()){ if (f.exists()) {
// Config was loaded from file // Config was loaded from file
if ( !config.containsKey("twitter.auth") if ( !config.containsKey("twitter.auth")
|| !config.containsKey("twitter.max_requests") || !config.containsKey("twitter.max_requests")
@ -171,16 +170,18 @@ public class Utils {
public static String stripURLParameter(String url, String parameter) { public static String stripURLParameter(String url, String parameter) {
int paramIndex = url.indexOf("?" + parameter); int paramIndex = url.indexOf("?" + parameter);
boolean wasFirstParam = true; boolean wasFirstParam = true;
if(paramIndex < 0) { if (paramIndex < 0) {
wasFirstParam = false; wasFirstParam = false;
paramIndex = url.indexOf("&" + parameter); paramIndex = url.indexOf("&" + parameter);
} }
if(paramIndex > 0) { if (paramIndex > 0) {
int nextParam = url.indexOf("&", paramIndex+1); int nextParam = url.indexOf("&", paramIndex+1);
if(nextParam != -1) { if (nextParam != -1) {
String c = "&"; String c = "&";
if(wasFirstParam) c = "?"; if (wasFirstParam) {
c = "?";
}
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length()); url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
} else { } else {
url = url.substring(0, paramIndex); url = url.substring(0, paramIndex);
@ -250,10 +251,10 @@ public class Utils {
jarPath = URLDecoder.decode(jarPath, "UTF-8"); jarPath = URLDecoder.decode(jarPath, "UTF-8");
JarFile jarFile = new JarFile(jarPath); JarFile jarFile = new JarFile(jarPath);
Enumeration<JarEntry> entries = jarFile.entries(); Enumeration<JarEntry> entries = jarFile.entries();
while(entries.hasMoreElements()) { while (entries.hasMoreElements()) {
JarEntry nextElement = entries.nextElement(); JarEntry nextElement = entries.nextElement();
String entryName = nextElement.getName(); String entryName = nextElement.getName();
if(entryName.startsWith(relPath) if (entryName.startsWith(relPath)
&& entryName.length() > (relPath.length() + "/".length()) && entryName.length() > (relPath.length() + "/".length())
&& !nextElement.isDirectory()) { && !nextElement.isDirectory()) {
String className = entryName.replace('/', '.').replace('\\', '.').replace(".class", ""); String className = entryName.replace('/', '.').replace('\\', '.').replace(".class", "");
@ -401,7 +402,7 @@ public class Utils {
public static Map<String,String> parseUrlQuery(String query) { public static Map<String,String> parseUrlQuery(String query) {
Map<String,String> res = new HashMap<String, String>(); Map<String,String> res = new HashMap<String, String>();
if (query.equals("")){ if (query.equals("")) {
return res; return res;
} }
@ -410,9 +411,9 @@ public class Utils {
try { try {
for (String part : parts) { for (String part : parts) {
if ((pos = part.indexOf('=')) >= 0){ if ((pos = part.indexOf('=')) >= 0) {
res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), URLDecoder.decode(part.substring(pos + 1), "UTF-8")); res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), URLDecoder.decode(part.substring(pos + 1), "UTF-8"));
}else{ } else {
res.put(URLDecoder.decode(part, "UTF-8"), ""); res.put(URLDecoder.decode(part, "UTF-8"), "");
} }
} }
@ -434,7 +435,7 @@ public class Utils {
* @return The associated value or null if key wasn't found * @return The associated value or null if key wasn't found
*/ */
public static String parseUrlQuery(String query, String key) { public static String parseUrlQuery(String query, String key) {
if (query.equals("")){ if (query.equals("")) {
return null; return null;
} }
@ -444,7 +445,7 @@ public class Utils {
try { try {
for (String part : parts) { for (String part : parts) {
if ((pos = part.indexOf('=')) >= 0) { if ((pos = part.indexOf('=')) >= 0) {
if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)){ if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)) {
return URLDecoder.decode(part.substring(pos + 1), "UTF-8"); return URLDecoder.decode(part.substring(pos + 1), "UTF-8");
} }