Merge branch 'formatting': Various code cleanup and formatting.
This commit is contained in:
commit
dc2c100d9d
@ -146,7 +146,7 @@ public class App {
|
||||
try {
|
||||
String url;
|
||||
BufferedReader br = new BufferedReader(new FileReader(filename));
|
||||
while((url = br.readLine()) != null) {
|
||||
while ((url = br.readLine()) != null) {
|
||||
// loop through each url in the file and proces each url individually.
|
||||
ripURL(url.trim(), cl.hasOption("n"));
|
||||
}
|
||||
@ -171,7 +171,7 @@ public class App {
|
||||
if (!history.contains(url.toExternalForm())) {
|
||||
history.add(url.toExternalForm());
|
||||
Utils.setConfigList("download.history", Arrays.asList(history.toArray()));
|
||||
if(saveConfig) {
|
||||
if (saveConfig) {
|
||||
Utils.saveConfig();
|
||||
}
|
||||
}
|
||||
|
@ -158,13 +158,13 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
Boolean self_hosted = false;
|
||||
if (!generalChanSite) {
|
||||
for (String cdnDomain : chanSite.cdnDomains) {
|
||||
if (href.contains(cdnDomain)){
|
||||
if (href.contains(cdnDomain)) {
|
||||
self_hosted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (self_hosted || generalChanSite){
|
||||
if (self_hosted || generalChanSite) {
|
||||
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
|
||||
m = p.matcher(href);
|
||||
if (m.matches()) {
|
||||
@ -194,7 +194,7 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
List<URL> urls = RipUtils.getFilesFromURL(originalURL);
|
||||
for(URL imageurl : urls){
|
||||
for (URL imageurl : urls) {
|
||||
imageURLs.add(imageurl.toString());
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
@ -8,24 +7,16 @@ import com.rarchives.ripme.utils.Utils;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author
|
||||
*/
|
||||
public class E621Ripper extends AbstractHTMLRipper {
|
||||
public static final int POOL_IMAGES_PER_PAGE = 24;
|
||||
|
||||
@ -177,5 +168,4 @@ public class E621Ripper extends AbstractHTMLRipper {
|
||||
|
||||
return Utils.filesystemSafe(prefix + getTerm(url));
|
||||
}
|
||||
|
||||
}
|
@ -9,7 +9,6 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
import org.jsoup.Connection.Method;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
/**
|
||||
@ -45,7 +42,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index){
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url);
|
||||
}
|
||||
|
||||
@ -66,12 +63,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc){
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> URLs = new ArrayList<String>();
|
||||
//Pictures
|
||||
Elements imgs = doc.getElementsByTag("img");
|
||||
for (Element img : imgs){
|
||||
if (img.hasClass("album-image")){
|
||||
for (Element img : imgs) {
|
||||
if (img.hasClass("album-image")) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(imageURL);
|
||||
@ -79,8 +76,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.getElementsByTag("video");
|
||||
for (Element vid : vids){
|
||||
if (vid.hasClass("album-video")){
|
||||
for (Element vid : vids) {
|
||||
if (vid.hasClass("album-video")) {
|
||||
Elements source = vid.getElementsByTag("source");
|
||||
String videoURL = source.first().attr("src");
|
||||
URLs.add(videoURL);
|
||||
@ -122,8 +119,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
List<URL> URLs = new ArrayList<URL>();
|
||||
//Pictures
|
||||
Elements imgs = doc.getElementsByTag("img");
|
||||
for (Element img : imgs){
|
||||
if (img.hasClass("album-image")){
|
||||
for (Element img : imgs) {
|
||||
if (img.hasClass("album-image")) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(new URL(imageURL));
|
||||
@ -131,8 +128,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.getElementsByTag("video");
|
||||
for (Element vid : vids){
|
||||
if (vid.hasClass("album-video")){
|
||||
for (Element vid : vids) {
|
||||
if (vid.hasClass("album-video")) {
|
||||
Elements source = vid.getElementsByTag("source");
|
||||
String videoURL = source.first().attr("src");
|
||||
URLs.add(new URL(videoURL));
|
||||
|
@ -59,8 +59,8 @@ public class FapprovedRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if ( (doc.select("div.pagination li.next.disabled").size() != 0)
|
||||
|| (doc.select("div.pagination").size() == 0) ) {
|
||||
if ((doc.select("div.pagination li.next.disabled").size() != 0)
|
||||
|| (doc.select("div.pagination").size() == 0)) {
|
||||
throw new IOException("No more pages found");
|
||||
}
|
||||
sleep(1000);
|
||||
|
@ -11,11 +11,8 @@ import java.util.regex.Pattern;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class InstagramRipper extends AbstractJSONRipper {
|
||||
@ -65,7 +62,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
|
||||
Pattern p = Pattern.compile("^https?://instagram\\.com/([^/]+)");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if(m.matches()) {
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
@ -95,7 +92,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
throw new IOException("No additional pages found");
|
||||
}
|
||||
|
||||
if(nextPageAvailable) {
|
||||
if (nextPageAvailable) {
|
||||
JSONArray items = json.getJSONArray("items");
|
||||
JSONObject last_item = items.getJSONObject(items.length() - 1);
|
||||
String nextMaxID = last_item.getString("id");
|
||||
|
@ -2,24 +2,15 @@ package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
public static boolean isTag;
|
||||
@ -95,13 +86,13 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
List<String> albumPagesList = new ArrayList<String>();
|
||||
int pageNumber = 1;
|
||||
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
|
||||
while(true) {
|
||||
while (true) {
|
||||
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
|
||||
Document nextAlbumPage;
|
||||
try {
|
||||
logger.info("Grabbing " + urlToGet);
|
||||
nextAlbumPage = Http.url(urlToGet).get();
|
||||
} catch(IOException e){
|
||||
} catch(IOException e) {
|
||||
logger.warn("Failed to log link in Jsoup");
|
||||
nextAlbumPage = null;
|
||||
e.printStackTrace();
|
||||
@ -109,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
|
||||
String nextPage = elem.attr("href");
|
||||
pageNumber = pageNumber + 1;
|
||||
if(nextPage == ""){
|
||||
if (nextPage == "") {
|
||||
logger.info("Got " + pageNumber + " pages");
|
||||
break;
|
||||
}
|
||||
@ -145,7 +136,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
else {
|
||||
album_doc = Http.url(element).get();
|
||||
}
|
||||
} catch(IOException e){
|
||||
} catch(IOException e) {
|
||||
logger.warn("Failed to log link in Jsoup");
|
||||
album_doc = null;
|
||||
e.printStackTrace();
|
||||
|
@ -4,7 +4,6 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
@ -13,7 +12,6 @@ import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class NatalieMuRipper extends AbstractHTMLRipper {
|
||||
|
@ -21,18 +21,14 @@ import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author
|
||||
*/
|
||||
public class PahealRipper extends AbstractHTMLRipper{
|
||||
private static Map<String,String> cookies=null;
|
||||
private static Pattern gidPattern=null;
|
||||
public class PahealRipper extends AbstractHTMLRipper {
|
||||
private static Map<String, String> cookies = null;
|
||||
private static Pattern gidPattern = null;
|
||||
|
||||
private static Map<String, String> getCookies() {
|
||||
if(cookies==null){
|
||||
cookies=new HashMap<String, String>(1);
|
||||
cookies.put("ui-tnc-agreed","true");
|
||||
if (cookies == null) {
|
||||
cookies = new HashMap<String, String>(1);
|
||||
cookies.put("ui-tnc-agreed", "true");
|
||||
}
|
||||
return cookies;
|
||||
}
|
||||
@ -53,26 +49,28 @@ public class PahealRipper extends AbstractHTMLRipper{
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
return Http.url("http://rule34.paheal.net/post/list/"+getTerm(url)+"/1").cookies(getCookies()).get();
|
||||
return Http.url("http://rule34.paheal.net/post/list/" + getTerm(url) + "/1").cookies(getCookies()).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document page) throws IOException {
|
||||
for(Element e:page.select("#paginator a")){
|
||||
if(e.text().toLowerCase().equals("next"))
|
||||
for (Element e : page.select("#paginator a")) {
|
||||
if (e.text().toLowerCase().equals("next")) {
|
||||
return Http.url(e.absUrl("href")).cookies(getCookies()).get();
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
Elements elements=page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
|
||||
List<String> res=new ArrayList<String>(elements.size());
|
||||
Elements elements = page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
|
||||
List<String> res = new ArrayList<String>(elements.size());
|
||||
|
||||
for(Element e:elements)
|
||||
for (Element e : elements) {
|
||||
res.add(e.absUrl("href"));
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -80,16 +78,20 @@ public class PahealRipper extends AbstractHTMLRipper{
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
try {
|
||||
String name=url.getPath();
|
||||
String ext=".png";
|
||||
String name = url.getPath();
|
||||
String ext = ".png";
|
||||
|
||||
name=name.substring(name.lastIndexOf('/')+1);
|
||||
if(name.indexOf('.')>=0){
|
||||
ext=name.substring(name.lastIndexOf('.'));
|
||||
name=name.substring(0,name.length()-ext.length());
|
||||
name = name.substring(name.lastIndexOf('/') + 1);
|
||||
if (name.indexOf('.') >= 0) {
|
||||
ext = name.substring(name.lastIndexOf('.'));
|
||||
name = name.substring(0, name.length() - ext.length());
|
||||
}
|
||||
|
||||
addURLToDownload(url,new File(workingDir.getCanonicalPath()+File.separator+Utils.filesystemSafe(new URI(name).getPath())+ext));
|
||||
File outFile = new File(workingDir.getCanonicalPath()
|
||||
+ File.separator
|
||||
+ Utils.filesystemSafe(new URI(name).getPath())
|
||||
+ ext);
|
||||
addURLToDownload(url, outFile);
|
||||
} catch (IOException ex) {
|
||||
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
} catch (URISyntaxException ex) {
|
||||
@ -97,15 +99,17 @@ public class PahealRipper extends AbstractHTMLRipper{
|
||||
}
|
||||
}
|
||||
|
||||
private String getTerm(URL url) throws MalformedURLException{
|
||||
if(gidPattern==null)
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
private String getTerm(URL url) throws MalformedURLException {
|
||||
if (gidPattern == null) {
|
||||
gidPattern = Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
}
|
||||
|
||||
Matcher m = gidPattern.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
if (m.matches()) {
|
||||
return m.group(2);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead");
|
||||
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -116,7 +120,6 @@ public class PahealRipper extends AbstractHTMLRipper{
|
||||
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead");
|
||||
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ public class RedditRipper extends AlbumRipper {
|
||||
JSONArray jsonArray = new JSONArray();
|
||||
if (jsonObj instanceof JSONObject) {
|
||||
jsonArray.put( (JSONObject) jsonObj);
|
||||
} else if (jsonObj instanceof JSONArray){
|
||||
} else if (jsonObj instanceof JSONArray) {
|
||||
jsonArray = (JSONArray) jsonObj;
|
||||
} else {
|
||||
logger.warn("[!] Unable to parse JSON: " + jsonString);
|
||||
|
@ -194,9 +194,9 @@ public class TumblrRipper extends AlbumRipper {
|
||||
try {
|
||||
fileURL = new URL(photo.getJSONObject("original_size").getString("url"));
|
||||
m = p.matcher(fileURL.toString());
|
||||
if(m.matches()) {
|
||||
if (m.matches()) {
|
||||
addURLToDownload(fileURL);
|
||||
} else{
|
||||
} else {
|
||||
URL redirectedURL = Http.url(fileURL).ignoreContentType().response().url();
|
||||
addURLToDownload(redirectedURL);
|
||||
}
|
||||
|
@ -11,19 +11,18 @@ import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
|
||||
public WordpressComicRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
public static List<String> explicit_domains = Arrays.asList("www.totempole666.com",
|
||||
"buttsmithy.com", "themonsterunderthebed.net", "prismblush.com");
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
String host = url.toExternalForm().split("/")[2];
|
||||
@ -63,8 +62,8 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
if (prismblushMat.matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -97,7 +96,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
String url_name = url.toExternalForm();
|
||||
@ -129,10 +127,10 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
nextPage = elem.attr("href");
|
||||
}
|
||||
|
||||
if (nextPage == "") {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return Http.url(nextPage).get();
|
||||
}
|
||||
}
|
||||
@ -160,6 +158,4 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
@ -18,12 +17,8 @@ import java.util.regex.Pattern;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author
|
||||
*/
|
||||
public class XbooruRipper extends AbstractHTMLRipper{
|
||||
private static Pattern gidPattern=null;
|
||||
public class XbooruRipper extends AbstractHTMLRipper {
|
||||
private static Pattern gidPattern = null;
|
||||
|
||||
public XbooruRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
@ -39,8 +34,8 @@ public class XbooruRipper extends AbstractHTMLRipper{
|
||||
return "xbooru";
|
||||
}
|
||||
|
||||
private String getPage(int num) throws MalformedURLException{
|
||||
return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid="+num+"&tags="+getTerm(url);
|
||||
private String getPage(int num) throws MalformedURLException {
|
||||
return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid=" + num + "&tags=" + getTerm(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -50,37 +45,41 @@ public class XbooruRipper extends AbstractHTMLRipper{
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
int offset=Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
|
||||
int num=Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
|
||||
int offset = Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
|
||||
int num = Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
|
||||
|
||||
if(offset+100>num)
|
||||
if (offset + 100 > num) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return Http.url(getPage(offset/100+1)).get();
|
||||
return Http.url(getPage(offset / 100 + 1)).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
List<String> res=new ArrayList<String>(100);
|
||||
for(Element e:page.getElementsByTag("post"))
|
||||
res.add(e.absUrl("file_url")+"#"+e.attr("id"));
|
||||
List<String> res = new ArrayList<String>(100);
|
||||
for (Element e : page.getElementsByTag("post")) {
|
||||
res.add(e.absUrl("file_url") + "#" + e.attr("id"));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url,Utils.getConfigBoolean("download.save_order",true)?url.getRef()+"-":"");
|
||||
addURLToDownload(url, Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
|
||||
}
|
||||
|
||||
private String getTerm(URL url) throws MalformedURLException{
|
||||
if(gidPattern==null)
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
|
||||
private String getTerm(URL url) throws MalformedURLException {
|
||||
if (gidPattern == null) {
|
||||
gidPattern = Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
|
||||
}
|
||||
|
||||
Matcher m = gidPattern.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
if (m.matches()) {
|
||||
return m.group(4);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead");
|
||||
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -91,7 +90,6 @@ public class XbooruRipper extends AbstractHTMLRipper{
|
||||
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead");
|
||||
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -14,10 +13,8 @@ import java.util.regex.Pattern;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
|
@ -3,22 +3,26 @@ package com.rarchives.ripme.ripper.rippers.ripperhelpers;
|
||||
import java.util.List;
|
||||
|
||||
public class ChanSite {
|
||||
//The domains where the threads are hosted.
|
||||
// The domains where the threads are hosted.
|
||||
public List<String> domains;
|
||||
//The domains where the images are hosted.
|
||||
// The domains where the images are hosted.
|
||||
public List<String> cdnDomains;
|
||||
|
||||
public ChanSite(List<String> Domains, List<String> CdnDomains){
|
||||
if(Domains.isEmpty())
|
||||
public ChanSite(List<String> Domains, List<String> CdnDomains) {
|
||||
if (Domains.isEmpty()) {
|
||||
throw new IllegalArgumentException("Domains");
|
||||
if(CdnDomains.isEmpty())
|
||||
}
|
||||
if (CdnDomains.isEmpty()) {
|
||||
throw new IllegalArgumentException("CdnDomains");
|
||||
}
|
||||
domains = Domains;
|
||||
cdnDomains = CdnDomains;
|
||||
}
|
||||
public ChanSite(List<String> Domains){
|
||||
if(Domains.isEmpty())
|
||||
|
||||
public ChanSite(List<String> Domains) {
|
||||
if (Domains.isEmpty()) {
|
||||
throw new IllegalArgumentException("Domains");
|
||||
}
|
||||
domains = Domains;
|
||||
cdnDomains = Domains;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package com.rarchives.ripme.utils;
|
||||
/**
|
||||
* Base64 encoder/decoder
|
||||
* From http://stackoverflow.com/a/4265472
|
||||
*
|
||||
*/
|
||||
public class Base64 {
|
||||
private final static char[] ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray();
|
||||
@ -11,8 +10,8 @@ public class Base64 {
|
||||
private static int[] toInt = new int[128];
|
||||
|
||||
static {
|
||||
for(int i=0; i< ALPHABET.length; i++){
|
||||
toInt[ALPHABET[i]]= i;
|
||||
for (int i = 0; i < ALPHABET.length; i++) {
|
||||
toInt[ALPHABET[i]] = i;
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,12 +21,12 @@ public class Base64 {
|
||||
* @param buf the byte array (not null)
|
||||
* @return the translated Base64 string (not null)
|
||||
*/
|
||||
public static String encode(byte[] buf){
|
||||
public static String encode(byte[] buf) {
|
||||
int size = buf.length;
|
||||
char[] ar = new char[((size + 2) / 3) * 4];
|
||||
int a = 0;
|
||||
int i=0;
|
||||
while(i < size){
|
||||
int i = 0;
|
||||
while (i < size) {
|
||||
byte b0 = buf[i++];
|
||||
byte b1 = (i < size) ? buf[i++] : 0;
|
||||
byte b2 = (i < size) ? buf[i++] : 0;
|
||||
@ -38,7 +37,7 @@ public class Base64 {
|
||||
ar[a++] = ALPHABET[((b1 << 2) | ((b2 & 0xFF) >> 6)) & mask];
|
||||
ar[a++] = ALPHABET[b2 & mask];
|
||||
}
|
||||
switch(size % 3){
|
||||
switch (size % 3) {
|
||||
case 1: ar[--a] = '=';
|
||||
case 2: ar[--a] = '=';
|
||||
}
|
||||
@ -51,25 +50,25 @@ public class Base64 {
|
||||
* @param s the Base64 string (not null)
|
||||
* @return the byte array (not null)
|
||||
*/
|
||||
public static byte[] decode(String s){
|
||||
int delta = s.endsWith( "==" ) ? 2 : s.endsWith( "=" ) ? 1 : 0;
|
||||
byte[] buffer = new byte[s.length()*3/4 - delta];
|
||||
public static byte[] decode(String s) {
|
||||
int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0;
|
||||
byte[] buffer = new byte[s.length() * 3 / 4 - delta];
|
||||
int mask = 0xFF;
|
||||
int index = 0;
|
||||
for(int i=0; i< s.length(); i+=4){
|
||||
int c0 = toInt[s.charAt( i )];
|
||||
int c1 = toInt[s.charAt( i + 1)];
|
||||
buffer[index++]= (byte)(((c0 << 2) | (c1 >> 4)) & mask);
|
||||
if(index >= buffer.length){
|
||||
for (int i = 0; i < s.length(); i += 4) {
|
||||
int c0 = toInt[s.charAt(i)];
|
||||
int c1 = toInt[s.charAt(i + 1)];
|
||||
buffer[index++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask);
|
||||
if (index >= buffer.length) {
|
||||
return buffer;
|
||||
}
|
||||
int c2 = toInt[s.charAt( i + 2)];
|
||||
buffer[index++]= (byte)(((c1 << 4) | (c2 >> 2)) & mask);
|
||||
if(index >= buffer.length){
|
||||
int c2 = toInt[s.charAt(i + 2)];
|
||||
buffer[index++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask);
|
||||
if (index >= buffer.length) {
|
||||
return buffer;
|
||||
}
|
||||
int c3 = toInt[s.charAt( i + 3 )];
|
||||
buffer[index++]= (byte)(((c2 << 6) | c3) & mask);
|
||||
int c3 = toInt[s.charAt(i + 3)];
|
||||
buffer[index++] = (byte) (((c2 << 6) | c3) & mask);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
||||
|
||||
|
||||
public class RipUtils {
|
||||
private static final Logger logger = Logger.getLogger(RipUtils.class);
|
||||
|
||||
@ -119,7 +118,7 @@ public class RipUtils {
|
||||
}
|
||||
|
||||
if (url.getHost().equals("imgur.com") ||
|
||||
url.getHost().equals("m.imgur.com")){
|
||||
url.getHost().equals("m.imgur.com")) {
|
||||
try {
|
||||
// Fetch the page
|
||||
Document doc = Jsoup.connect(url.toExternalForm())
|
||||
@ -165,18 +164,6 @@ public class RipUtils {
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "vinebox", "http://finebox.co/u/", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "imgbox", "http://imgbox.com/g/", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "modelmayhem", "http://www.modelmayhem.com/", "");
|
||||
/*
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
|
||||
*/
|
||||
//if (url == null) url = urlFromSiteDirectoryName(dir, "8muses", "http://www.8muses.com/index/category/", "");
|
||||
return url;
|
||||
}
|
||||
@ -248,9 +235,8 @@ public class RipUtils {
|
||||
List<String> fields = Arrays.asList(dir.split("_"));
|
||||
String album = fields.get(1);
|
||||
String url = "http://";
|
||||
if ( (fields.contains("top") || fields.contains("new"))
|
||||
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))
|
||||
) {
|
||||
if ((fields.contains("top") || fields.contains("new"))
|
||||
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))) {
|
||||
// Subreddit
|
||||
fields.remove(0); // "imgur"
|
||||
String sub = "";
|
||||
|
@ -34,7 +34,6 @@ import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
* Common utility functions used in various places throughout the project.
|
||||
*/
|
||||
public class Utils {
|
||||
|
||||
public static final String RIP_DIRECTORY = "rips";
|
||||
private static final String configFile = "rip.properties";
|
||||
private static final Logger logger = Logger.getLogger(Utils.class);
|
||||
@ -50,7 +49,7 @@ public class Utils {
|
||||
}
|
||||
config = new PropertiesConfiguration(configPath);
|
||||
logger.info("Loaded " + config.getPath());
|
||||
if (f.exists()){
|
||||
if (f.exists()) {
|
||||
// Config was loaded from file
|
||||
if ( !config.containsKey("twitter.auth")
|
||||
|| !config.containsKey("twitter.max_requests")
|
||||
@ -171,16 +170,18 @@ public class Utils {
|
||||
public static String stripURLParameter(String url, String parameter) {
|
||||
int paramIndex = url.indexOf("?" + parameter);
|
||||
boolean wasFirstParam = true;
|
||||
if(paramIndex < 0) {
|
||||
if (paramIndex < 0) {
|
||||
wasFirstParam = false;
|
||||
paramIndex = url.indexOf("&" + parameter);
|
||||
}
|
||||
|
||||
if(paramIndex > 0) {
|
||||
if (paramIndex > 0) {
|
||||
int nextParam = url.indexOf("&", paramIndex+1);
|
||||
if(nextParam != -1) {
|
||||
if (nextParam != -1) {
|
||||
String c = "&";
|
||||
if(wasFirstParam) c = "?";
|
||||
if (wasFirstParam) {
|
||||
c = "?";
|
||||
}
|
||||
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
|
||||
} else {
|
||||
url = url.substring(0, paramIndex);
|
||||
@ -250,10 +251,10 @@ public class Utils {
|
||||
jarPath = URLDecoder.decode(jarPath, "UTF-8");
|
||||
JarFile jarFile = new JarFile(jarPath);
|
||||
Enumeration<JarEntry> entries = jarFile.entries();
|
||||
while(entries.hasMoreElements()) {
|
||||
while (entries.hasMoreElements()) {
|
||||
JarEntry nextElement = entries.nextElement();
|
||||
String entryName = nextElement.getName();
|
||||
if(entryName.startsWith(relPath)
|
||||
if (entryName.startsWith(relPath)
|
||||
&& entryName.length() > (relPath.length() + "/".length())
|
||||
&& !nextElement.isDirectory()) {
|
||||
String className = entryName.replace('/', '.').replace('\\', '.').replace(".class", "");
|
||||
@ -401,7 +402,7 @@ public class Utils {
|
||||
public static Map<String,String> parseUrlQuery(String query) {
|
||||
Map<String,String> res = new HashMap<String, String>();
|
||||
|
||||
if (query.equals("")){
|
||||
if (query.equals("")) {
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -410,9 +411,9 @@ public class Utils {
|
||||
|
||||
try {
|
||||
for (String part : parts) {
|
||||
if ((pos = part.indexOf('=')) >= 0){
|
||||
if ((pos = part.indexOf('=')) >= 0) {
|
||||
res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), URLDecoder.decode(part.substring(pos + 1), "UTF-8"));
|
||||
}else{
|
||||
} else {
|
||||
res.put(URLDecoder.decode(part, "UTF-8"), "");
|
||||
}
|
||||
}
|
||||
@ -434,7 +435,7 @@ public class Utils {
|
||||
* @return The associated value or null if key wasn't found
|
||||
*/
|
||||
public static String parseUrlQuery(String query, String key) {
|
||||
if (query.equals("")){
|
||||
if (query.equals("")) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -444,7 +445,7 @@ public class Utils {
|
||||
try {
|
||||
for (String part : parts) {
|
||||
if ((pos = part.indexOf('=')) >= 0) {
|
||||
if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)){
|
||||
if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)) {
|
||||
return URLDecoder.decode(part.substring(pos + 1), "UTF-8");
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user