Merge branch 'formatting': Various code cleanup and formatting.

This commit is contained in:
MetaPrime 2017-05-09 17:51:19 -07:00
commit dc2c100d9d
52 changed files with 745 additions and 788 deletions

View File

@ -146,7 +146,7 @@ public class App {
try {
String url;
BufferedReader br = new BufferedReader(new FileReader(filename));
while((url = br.readLine()) != null) {
while ((url = br.readLine()) != null) {
// loop through each url in the file and proces each url individually.
ripURL(url.trim(), cl.hasOption("n"));
}
@ -171,7 +171,7 @@ public class App {
if (!history.contains(url.toExternalForm())) {
history.add(url.toExternalForm());
Utils.setConfigList("download.history", Arrays.asList(history.toArray()));
if(saveConfig) {
if (saveConfig) {
Utils.saveConfig();
}
}

View File

@ -30,7 +30,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
}
public abstract List<String> getURLsFromPage(Document page);
public List<String> getDescriptionsFromPage(Document doc) throws IOException {
throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
}
public abstract void downloadURL(URL url, int index);
public DownloadThreadPool getThreadPool() {
@ -51,10 +51,10 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
return url;
}
public boolean hasDescriptionSupport() {
return false;
return false;
}
public String getDescription(String page) throws IOException {
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
}
public int descSleepTime() {
return 0;
@ -90,23 +90,23 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
}
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
logger.debug("Fetching description(s) from " + doc.location());
List<String> textURLs = getDescriptionsFromPage(doc);
if (textURLs.size() > 0) {
List<String> textURLs = getDescriptionsFromPage(doc);
if (textURLs.size() > 0) {
logger.debug("Found description link(s) from " + doc.location());
for (String textURL : textURLs) {
if (isStopped()) {
break;
}
textindex += 1;
logger.debug("Getting description from " + textURL);
for (String textURL : textURLs) {
if (isStopped()) {
break;
}
textindex += 1;
logger.debug("Getting description from " + textURL);
sleep(descSleepTime());
String tempDesc = getDescription(textURL);
if (tempDesc != null) {
logger.debug("Got description: " + tempDesc);
saveText(new URL(textURL), "", tempDesc, textindex);
}
}
}
String tempDesc = getDescription(textURL);
if (tempDesc != null) {
logger.debug("Got description: " + tempDesc);
saveText(new URL(textURL), "", tempDesc, textindex);
}
}
}
}
if (isStopped() || isThisATest()) {

View File

@ -195,7 +195,7 @@ public abstract class AbstractRipper
public void retrievingSource(String url) {
RipStatusMessage msg = new RipStatusMessage(STATUS.LOADING_RESOURCE, url);
if (observer != null) {
observer.update(this, msg);
observer.update(this, msg);
}
}

View File

@ -158,13 +158,13 @@ public class ChanRipper extends AbstractHTMLRipper {
Boolean self_hosted = false;
if (!generalChanSite) {
for (String cdnDomain : chanSite.cdnDomains) {
if (href.contains(cdnDomain)){
if (href.contains(cdnDomain)) {
self_hosted = true;
}
}
}
if (self_hosted || generalChanSite){
if (self_hosted || generalChanSite) {
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(href);
if (m.matches()) {
@ -194,7 +194,7 @@ public class ChanRipper extends AbstractHTMLRipper {
}
List<URL> urls = RipUtils.getFilesFromURL(originalURL);
for(URL imageurl : urls){
for (URL imageurl : urls) {
imageURLs.add(imageurl.toString());
}
}

View File

@ -48,7 +48,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
}
@Override
public boolean hasDescriptionSupport() {
return true;
return true;
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -8,174 +7,165 @@ import com.rarchives.ripme.utils.Utils;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
*
* @author
*/
public class E621Ripper extends AbstractHTMLRipper {
public static final int POOL_IMAGES_PER_PAGE = 24;
public static final int POOL_IMAGES_PER_PAGE = 24;
private DownloadThreadPool e621ThreadPool = new DownloadThreadPool("e621");
private DownloadThreadPool e621ThreadPool = new DownloadThreadPool("e621");
public E621Ripper(URL url) throws IOException {
super(url);
}
public E621Ripper(URL url) throws IOException {
super(url);
}
@Override
public DownloadThreadPool getThreadPool() {
return e621ThreadPool;
}
@Override
public DownloadThreadPool getThreadPool() {
return e621ThreadPool;
}
@Override
public String getDomain() {
return "e621.net";
}
@Override
public String getDomain() {
return "e621.net";
}
@Override
public String getHost() {
return "e621";
}
@Override
public String getHost() {
return "e621";
}
@Override
public Document getFirstPage() throws IOException {
if (url.getPath().startsWith("/pool/show/")) {
return Http.url("https://e621.net/pool/show/" + getTerm(url)).get();
} else {
return Http.url("https://e621.net/post/index/1/" + getTerm(url)).get();
}
}
@Override
public Document getFirstPage() throws IOException {
if (url.getPath().startsWith("/pool/show/")) {
return Http.url("https://e621.net/pool/show/" + getTerm(url)).get();
} else {
return Http.url("https://e621.net/post/index/1/" + getTerm(url)).get();
}
}
@Override
public List<String> getURLsFromPage(Document page) {
Elements elements = page.select("#post-list .thumb a,#pool-show .thumb a");
List<String> res = new ArrayList<String>(elements.size());
@Override
public List<String> getURLsFromPage(Document page) {
Elements elements = page.select("#post-list .thumb a,#pool-show .thumb a");
List<String> res = new ArrayList<String>(elements.size());
if (page.getElementById("pool-show") != null) {
int index = 0;
if (page.getElementById("pool-show") != null) {
int index = 0;
Element e = page.getElementById("paginator");
if (e != null) {
e = e.getElementsByClass("current").first();
if (e != null) {
index = (Integer.parseInt(e.text()) - 1) * POOL_IMAGES_PER_PAGE;
}
}
Element e = page.getElementById("paginator");
if (e != null) {
e = e.getElementsByClass("current").first();
if (e != null) {
index = (Integer.parseInt(e.text()) - 1) * POOL_IMAGES_PER_PAGE;
}
}
for (Element e_ : elements) {
res.add(e_.absUrl("href") + "#" + ++index);
}
for (Element e_ : elements) {
res.add(e_.absUrl("href") + "#" + ++index);
}
} else {
for (Element e : elements) {
res.add(e.absUrl("href") + "#" + e.child(0).attr("id").substring(1));
}
}
} else {
for (Element e : elements) {
res.add(e.absUrl("href") + "#" + e.child(0).attr("id").substring(1));
}
}
return res;
}
return res;
}
@Override
public Document getNextPage(Document page) throws IOException {
for (Element e : page.select("#paginator a")) {
if (e.attr("rel").equals("next")) {
return Http.url(e.absUrl("href")).get();
}
}
@Override
public Document getNextPage(Document page) throws IOException {
for (Element e : page.select("#paginator a")) {
if (e.attr("rel").equals("next")) {
return Http.url(e.absUrl("href")).get();
}
}
return null;
}
return null;
}
@Override
public void downloadURL(final URL url, int index) {
e621ThreadPool.addThread(new Thread(new Runnable() {
public void run() {
try {
Document page = Http.url(url).get();
Element e = page.getElementById("image");
@Override
public void downloadURL(final URL url, int index) {
e621ThreadPool.addThread(new Thread(new Runnable() {
public void run() {
try {
Document page = Http.url(url).get();
Element e = page.getElementById("image");
if (e != null) {
addURLToDownload(new URL(e.absUrl("src")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
} else if ((e = page.select(".content object>param[name=\"movie\"]").first()) != null) {
addURLToDownload(new URL(e.absUrl("value")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
} else {
Logger.getLogger(E621Ripper.class.getName()).log(Level.WARNING, "Unsupported media type - please report to program author: " + url.toString());
}
if (e != null) {
addURLToDownload(new URL(e.absUrl("src")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
} else if ((e = page.select(".content object>param[name=\"movie\"]").first()) != null) {
addURLToDownload(new URL(e.absUrl("value")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
} else {
Logger.getLogger(E621Ripper.class.getName()).log(Level.WARNING, "Unsupported media type - please report to program author: " + url.toString());
}
} catch (IOException ex) {
Logger.getLogger(E621Ripper.class.getName()).log(Level.SEVERE, null, ex);
}
}
}));
}
} catch (IOException ex) {
Logger.getLogger(E621Ripper.class.getName()).log(Level.SEVERE, null, ex);
}
}
}));
}
private String getTerm(URL url) throws MalformedURLException {
String query = url.getQuery();
private String getTerm(URL url) throws MalformedURLException {
String query = url.getQuery();
if (query != null) {
return Utils.parseUrlQuery(query, "tags");
}
if (query != null) {
return Utils.parseUrlQuery(query, "tags");
}
if (query == null) {
if ((query = url.getPath()).startsWith("/post/index/")) {
query = query.substring(12);
if (query == null) {
if ((query = url.getPath()).startsWith("/post/index/")) {
query = query.substring(12);
int pos = query.indexOf('/');
if (pos == -1) {
return null;
}
int pos = query.indexOf('/');
if (pos == -1) {
return null;
}
// skip page number
query = query.substring(pos + 1);
// skip page number
query = query.substring(pos + 1);
if (query.endsWith("/")) {
query = query.substring(0, query.length() - 1);
}
if (query.endsWith("/")) {
query = query.substring(0, query.length() - 1);
}
try {
return URLDecoder.decode(query, "UTF-8");
} catch (UnsupportedEncodingException e) {
// Shouldn't happen since UTF-8 is required to be supported
throw new RuntimeException(e);
}
try {
return URLDecoder.decode(query, "UTF-8");
} catch (UnsupportedEncodingException e) {
// Shouldn't happen since UTF-8 is required to be supported
throw new RuntimeException(e);
}
} else if (query.startsWith("/pool/show/")) {
query = query.substring(11);
} else if (query.startsWith("/pool/show/")) {
query = query.substring(11);
if (query.endsWith("/")) {
query = query.substring(0, query.length() - 1);
}
if (query.endsWith("/")) {
query = query.substring(0, query.length() - 1);
}
return query;
}
}
return query;
}
}
return null;
}
return null;
}
@Override
public String getGID(URL url) throws MalformedURLException {
String prefix = "";
if (url.getPath().startsWith("/pool/show/")) {
prefix = "pool_";
} else {
prefix = "term_";
}
return Utils.filesystemSafe(prefix + getTerm(url));
}
@Override
public String getGID(URL url) throws MalformedURLException {
String prefix = "";
if (url.getPath().startsWith("/pool/show/")) {
prefix = "pool_";
} else {
prefix = "term_";
}
return Utils.filesystemSafe(prefix + getTerm(url));
}
}

View File

@ -9,7 +9,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.jsoup.Connection.Method;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
/**
@ -45,7 +42,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
@Override
public void downloadURL(URL url, int index){
public void downloadURL(URL url, int index) {
addURLToDownload(url);
}
@ -66,12 +63,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override
public List<String> getURLsFromPage(Document doc){
public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(imageURL);
@ -79,8 +76,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(videoURL);
@ -122,8 +119,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL));
@ -131,8 +128,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL));

View File

@ -59,8 +59,8 @@ public class FapprovedRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
if ( (doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0) ) {
if ((doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0)) {
throw new IOException("No more pages found");
}
sleep(1000);

View File

@ -259,50 +259,50 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
List<String> imageURLs = new ArrayList<String>();
JSONArray photos = json.getJSONArray("photos");
for (int i = 0; i < photos.length(); i++) {
if (super.isStopped()) {
break;
}
if (super.isStopped()) {
break;
}
JSONObject photo = photos.getJSONObject(i);
String imageURL = null;
String rawUrl = "https://500px.com" + photo.getString("url");
Document doc;
Elements images = new Elements();
try {
logger.debug("Loading " + rawUrl);
super.retrievingSource(rawUrl);
doc = Http.url(rawUrl).get();
images = doc.select("div#preload img");
logger.debug("Loading " + rawUrl);
super.retrievingSource(rawUrl);
doc = Http.url(rawUrl).get();
images = doc.select("div#preload img");
}
catch (IOException e) {
logger.error("Error fetching full-size image from " + rawUrl, e);
logger.error("Error fetching full-size image from " + rawUrl, e);
}
if (images.size() > 0) {
imageURL = images.first().attr("src");
logger.debug("Found full-size non-watermarked image: " + imageURL);
imageURL = images.first().attr("src");
logger.debug("Found full-size non-watermarked image: " + imageURL);
}
else {
logger.debug("Falling back to image_url from API response");
imageURL = photo.getString("image_url");
imageURL = imageURL.replaceAll("/4\\.", "/5.");
// See if there's larger images
for (String imageSize : new String[] { "2048" } ) {
String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
sleep(10);
if (urlExists(fsURL)) {
logger.info("Found larger image at " + fsURL);
imageURL = fsURL;
break;
}
}
logger.debug("Falling back to image_url from API response");
imageURL = photo.getString("image_url");
imageURL = imageURL.replaceAll("/4\\.", "/5.");
// See if there's larger images
for (String imageSize : new String[] { "2048" } ) {
String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
sleep(10);
if (urlExists(fsURL)) {
logger.info("Found larger image at " + fsURL);
imageURL = fsURL;
break;
}
}
}
if (imageURL == null) {
logger.error("Failed to find image for photo " + photo.toString());
logger.error("Failed to find image for photo " + photo.toString());
}
else {
imageURLs.add(imageURL);
if (isThisATest()) {
break;
}
imageURLs.add(imageURL);
if (isThisATest()) {
break;
}
}
}
return imageURLs;

View File

@ -246,7 +246,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
if (donwloadLink.size() == 0) {
logger.warn("Could not download " + this.url);
return;
}
}
String link = "http:" + donwloadLink.first().attr("href");
logger.info("Found URL " + link);
String[] fileNameSplit = link.split("/");
@ -266,7 +266,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
addURLToDownload(new URL(link),saveAS,"",cookies);
} catch (IOException e) {
logger.error("[!] Exception while loading/parsing " + this.url, e);
}
}
}
}

View File

@ -11,11 +11,8 @@ import java.util.regex.Pattern;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractJSONRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
public class InstagramRipper extends AbstractJSONRipper {
@ -65,7 +62,7 @@ public class InstagramRipper extends AbstractJSONRipper {
Pattern p = Pattern.compile("^https?://instagram\\.com/([^/]+)");
Matcher m = p.matcher(url.toExternalForm());
if(m.matches()) {
if (m.matches()) {
return m.group(1);
}
@ -95,7 +92,7 @@ public class InstagramRipper extends AbstractJSONRipper {
throw new IOException("No additional pages found");
}
if(nextPageAvailable) {
if (nextPageAvailable) {
JSONArray items = json.getJSONArray("items");
JSONObject last_item = items.getJSONObject(items.length() - 1);
String nextMaxID = last_item.getString("id");

View File

@ -2,24 +2,15 @@ package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.Utils;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class MyhentaicomicsRipper extends AbstractHTMLRipper {
public static boolean isTag;
@ -95,13 +86,13 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
List<String> albumPagesList = new ArrayList<String>();
int pageNumber = 1;
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
while(true) {
while (true) {
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
Document nextAlbumPage;
try {
logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e){
} catch(IOException e) {
logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null;
e.printStackTrace();
@ -109,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href");
pageNumber = pageNumber + 1;
if(nextPage == ""){
if (nextPage == "") {
logger.info("Got " + pageNumber + " pages");
break;
}
@ -145,7 +136,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else {
album_doc = Http.url(element).get();
}
} catch(IOException e){
} catch(IOException e) {
logger.warn("Failed to log link in Jsoup");
album_doc = null;
e.printStackTrace();

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -13,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
import com.rarchives.ripme.utils.Http;
public class NatalieMuRipper extends AbstractHTMLRipper {

View File

@ -21,102 +21,105 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
*
* @author
*/
public class PahealRipper extends AbstractHTMLRipper{
private static Map<String,String> cookies=null;
private static Pattern gidPattern=null;
public class PahealRipper extends AbstractHTMLRipper {
private static Map<String, String> cookies = null;
private static Pattern gidPattern = null;
private static Map<String, String> getCookies() {
if(cookies==null){
cookies=new HashMap<String, String>(1);
cookies.put("ui-tnc-agreed","true");
}
return cookies;
}
private static Map<String, String> getCookies() {
if (cookies == null) {
cookies = new HashMap<String, String>(1);
cookies.put("ui-tnc-agreed", "true");
}
return cookies;
}
public PahealRipper(URL url) throws IOException {
super(url);
}
public PahealRipper(URL url) throws IOException {
super(url);
}
@Override
public String getDomain() {
return "rule34.paheal.net";
}
@Override
public String getDomain() {
return "rule34.paheal.net";
}
@Override
public String getHost() {
return "paheal";
}
@Override
public String getHost() {
return "paheal";
}
@Override
public Document getFirstPage() throws IOException {
return Http.url("http://rule34.paheal.net/post/list/"+getTerm(url)+"/1").cookies(getCookies()).get();
}
@Override
public Document getFirstPage() throws IOException {
return Http.url("http://rule34.paheal.net/post/list/" + getTerm(url) + "/1").cookies(getCookies()).get();
}
@Override
public Document getNextPage(Document page) throws IOException {
for(Element e:page.select("#paginator a")){
if(e.text().toLowerCase().equals("next"))
return Http.url(e.absUrl("href")).cookies(getCookies()).get();
}
@Override
public Document getNextPage(Document page) throws IOException {
for (Element e : page.select("#paginator a")) {
if (e.text().toLowerCase().equals("next")) {
return Http.url(e.absUrl("href")).cookies(getCookies()).get();
}
}
return null;
}
return null;
}
@Override
public List<String> getURLsFromPage(Document page) {
Elements elements=page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
List<String> res=new ArrayList<String>(elements.size());
@Override
public List<String> getURLsFromPage(Document page) {
Elements elements = page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
List<String> res = new ArrayList<String>(elements.size());
for(Element e:elements)
res.add(e.absUrl("href"));
for (Element e : elements) {
res.add(e.absUrl("href"));
}
return res;
}
return res;
}
@Override
public void downloadURL(URL url, int index) {
try {
String name=url.getPath();
String ext=".png";
@Override
public void downloadURL(URL url, int index) {
try {
String name = url.getPath();
String ext = ".png";
name=name.substring(name.lastIndexOf('/')+1);
if(name.indexOf('.')>=0){
ext=name.substring(name.lastIndexOf('.'));
name=name.substring(0,name.length()-ext.length());
}
name = name.substring(name.lastIndexOf('/') + 1);
if (name.indexOf('.') >= 0) {
ext = name.substring(name.lastIndexOf('.'));
name = name.substring(0, name.length() - ext.length());
}
addURLToDownload(url,new File(workingDir.getCanonicalPath()+File.separator+Utils.filesystemSafe(new URI(name).getPath())+ext));
} catch (IOException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
}
File outFile = new File(workingDir.getCanonicalPath()
+ File.separator
+ Utils.filesystemSafe(new URI(name).getPath())
+ ext);
addURLToDownload(url, outFile);
} catch (IOException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
}
private String getTerm(URL url) throws MalformedURLException{
if(gidPattern==null)
gidPattern=Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
private String getTerm(URL url) throws MalformedURLException {
if (gidPattern == null) {
gidPattern = Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
}
Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches())
return m.group(2);
Matcher m = gidPattern.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(2);
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead");
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
}
@Override
public String getGID(URL url) throws MalformedURLException {
try {
return Utils.filesystemSafe(new URI(getTerm(url)).getPath());
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got "+url+" instead");
}
@Override
public String getGID(URL url) throws MalformedURLException {
try {
return Utils.filesystemSafe(new URI(getTerm(url)).getPath());
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
}
}

View File

@ -132,7 +132,7 @@ public class RedditRipper extends AlbumRipper {
JSONArray jsonArray = new JSONArray();
if (jsonObj instanceof JSONObject) {
jsonArray.put( (JSONObject) jsonObj);
} else if (jsonObj instanceof JSONArray){
} else if (jsonObj instanceof JSONArray) {
jsonArray = (JSONArray) jsonObj;
} else {
logger.warn("[!] Unable to parse JSON: " + jsonString);

View File

@ -194,9 +194,9 @@ public class TumblrRipper extends AlbumRipper {
try {
fileURL = new URL(photo.getJSONObject("original_size").getString("url"));
m = p.matcher(fileURL.toString());
if(m.matches()) {
if (m.matches()) {
addURLToDownload(fileURL);
} else{
} else {
URL redirectedURL = Http.url(fileURL).ignoreContentType().response().url();
addURLToDownload(redirectedURL);
}

View File

@ -11,155 +11,151 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class WordpressComicRipper extends AbstractHTMLRipper {
public WordpressComicRipper(URL url) throws IOException {
super(url);
super(url);
}
public static List<String> explicit_domains = Arrays.asList("www.totempole666.com",
"buttsmithy.com", "themonsterunderthebed.net", "prismblush.com");
@Override
public String getHost() {
String host = url.toExternalForm().split("/")[2];
return host;
}
"buttsmithy.com", "themonsterunderthebed.net", "prismblush.com");
@Override
public String getDomain() {
String host = url.toExternalForm().split("/")[2];
return host;
}
@Override
public String getHost() {
String host = url.toExternalForm().split("/")[2];
return host;
}
@Override
public boolean canRip(URL url) {
String url_name = url.toExternalForm();
if (explicit_domains.contains(url_name.split("/")[2]) == true) {
Pattern totempole666Pat = Pattern.compile("https?://www\\.totempole666.com/comic/([a-zA-Z0-9_-]*)/?$");
Matcher totempole666Mat = totempole666Pat.matcher(url.toExternalForm());
if (totempole666Mat.matches()) {
return true;
}
@Override
public String getDomain() {
String host = url.toExternalForm().split("/")[2];
return host;
}
Pattern buttsmithyPat = Pattern.compile("https?://buttsmithy.com/archives/comic/([a-zA-Z0-9_-]*)/?$");
Matcher buttsmithyMat = buttsmithyPat.matcher(url.toExternalForm());
if (buttsmithyMat.matches()) {
return true;
}
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
if (theMonsterUnderTheBedMat.matches()) {
return true;
}
Pattern prismblushPat = Pattern.compile("https?://prismblush.com/comic/([a-zA-Z0-9_-]*)/?$");
Matcher prismblushMat = prismblushPat.matcher(url.toExternalForm());
if (prismblushMat.matches()) {
return true;
}
}
return false;
}
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com\\/comic/([a-zA-Z0-9_-]*)/?$");
@Override
public boolean canRip(URL url) {
String url_name = url.toExternalForm();
if (explicit_domains.contains(url_name.split("/")[2]) == true) {
Pattern totempole666Pat = Pattern.compile("https?://www\\.totempole666.com/comic/([a-zA-Z0-9_-]*)/?$");
Matcher totempole666Mat = totempole666Pat.matcher(url.toExternalForm());
if (totempole666Mat.matches()) {
return "totempole666.com" + "_" + "The_cummoner";
return true;
}
Pattern buttsmithyPat = Pattern.compile("https?://buttsmithy.com/archives/comic/([a-zA-Z0-9_-]*)/?$");
Matcher buttsmithyMat = buttsmithyPat.matcher(url.toExternalForm());
if (buttsmithyMat.matches()) {
return "buttsmithy.com" + "_" + "Alfie";
return true;
}
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/?comic=([a-zA-Z0-9_-]*)/?$");
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
if (theMonsterUnderTheBedMat.matches()) {
return "themonsterunderthebed.net_TheMonsterUnderTheBed";
return true;
}
Pattern prismblushPat = Pattern.compile("https?://prismblush.com/comic/([a-zA-Z0-9_-]*)/?$");
Matcher prismblushMat = prismblushPat.matcher(url.toExternalForm());
if (prismblushMat.matches()) {
return "prismblush.com_" + prismblushMat.group(1).replaceAll("-pg-\\d+", "");
return true;
}
return super.getAlbumTitle(url);
}
@Override
public String getGID(URL url) throws MalformedURLException {
String url_name = url.toExternalForm();
// We shouldn't need to return any GID
if (explicit_domains.contains(url_name.split("/")[2]) == true) {
return "";
}
throw new MalformedURLException("You should never see this error message");
}
@Override
public Document getFirstPage() throws IOException {
// "url" is an instance field of the superclass
return Http.url(url).get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
// Find next page
String nextPage = "";
Element elem = null;
if (explicit_domains.contains("www.totempole666.com") == true
|| explicit_domains.contains("buttsmithy.com") == true
|| explicit_domains.contains("themonsterunderthebed.net")
|| explicit_domains.contains("prismblush.com")) {
elem = doc.select("a.comic-nav-next").first();
if (elem == null) {
throw new IOException("No more pages");
}
nextPage = elem.attr("href");
}
if (nextPage == "") {
throw new IOException("No more pages");
}
else {
return Http.url(nextPage).get();
}
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> result = new ArrayList<String>();
if (explicit_domains.contains("www.totempole666.com") == true
|| explicit_domains.contains("buttsmithy.com") == true
|| explicit_domains.contains("themonsterunderthebed.net")
|| explicit_domains.contains("prismblush.com")) {
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
// If doc is the last page in the comic then elem.attr("src") returns null
// because there is no link <a> to the next page
if (elem == null) {
logger.debug("Got last page in totempole666 comic");
elem = doc.select("div.comic-table > div#comic > img").first();
}
result.add(elem.attr("src"));
}
return result;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
return false;
}
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com\\/comic/([a-zA-Z0-9_-]*)/?$");
Matcher totempole666Mat = totempole666Pat.matcher(url.toExternalForm());
if (totempole666Mat.matches()) {
return "totempole666.com" + "_" + "The_cummoner";
}
Pattern buttsmithyPat = Pattern.compile("https?://buttsmithy.com/archives/comic/([a-zA-Z0-9_-]*)/?$");
Matcher buttsmithyMat = buttsmithyPat.matcher(url.toExternalForm());
if (buttsmithyMat.matches()) {
return "buttsmithy.com" + "_" + "Alfie";
}
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/?comic=([a-zA-Z0-9_-]*)/?$");
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
if (theMonsterUnderTheBedMat.matches()) {
return "themonsterunderthebed.net_TheMonsterUnderTheBed";
}
Pattern prismblushPat = Pattern.compile("https?://prismblush.com/comic/([a-zA-Z0-9_-]*)/?$");
Matcher prismblushMat = prismblushPat.matcher(url.toExternalForm());
if (prismblushMat.matches()) {
return "prismblush.com_" + prismblushMat.group(1).replaceAll("-pg-\\d+", "");
}
return super.getAlbumTitle(url);
}
@Override
public String getGID(URL url) throws MalformedURLException {
String url_name = url.toExternalForm();
// We shouldn't need to return any GID
if (explicit_domains.contains(url_name.split("/")[2]) == true) {
return "";
}
throw new MalformedURLException("You should never see this error message");
}
@Override
public Document getFirstPage() throws IOException {
// "url" is an instance field of the superclass
return Http.url(url).get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
// Find next page
String nextPage = "";
Element elem = null;
if (explicit_domains.contains("www.totempole666.com") == true
|| explicit_domains.contains("buttsmithy.com") == true
|| explicit_domains.contains("themonsterunderthebed.net")
|| explicit_domains.contains("prismblush.com")) {
elem = doc.select("a.comic-nav-next").first();
if (elem == null) {
throw new IOException("No more pages");
}
nextPage = elem.attr("href");
}
if (nextPage == "") {
throw new IOException("No more pages");
} else {
return Http.url(nextPage).get();
}
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> result = new ArrayList<String>();
if (explicit_domains.contains("www.totempole666.com") == true
|| explicit_domains.contains("buttsmithy.com") == true
|| explicit_domains.contains("themonsterunderthebed.net")
|| explicit_domains.contains("prismblush.com")) {
Element elem = doc.select("div.comic-table > div#comic > a > img").first();
// If doc is the last page in the comic then elem.attr("src") returns null
// because there is no link <a> to the next page
if (elem == null) {
logger.debug("Got last page in totempole666 comic");
elem = doc.select("div.comic-table > div#comic > img").first();
}
result.add(elem.attr("src"));
}
return result;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -18,80 +17,79 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
/**
*
* @author
*/
public class XbooruRipper extends AbstractHTMLRipper{
private static Pattern gidPattern=null;
public class XbooruRipper extends AbstractHTMLRipper {
private static Pattern gidPattern = null;
public XbooruRipper(URL url) throws IOException {
super(url);
}
public XbooruRipper(URL url) throws IOException {
super(url);
}
@Override
public String getDomain() {
return "xbooru.com";
}
@Override
public String getDomain() {
return "xbooru.com";
}
@Override
public String getHost() {
return "xbooru";
}
@Override
public String getHost() {
return "xbooru";
}
private String getPage(int num) throws MalformedURLException{
return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid="+num+"&tags="+getTerm(url);
}
private String getPage(int num) throws MalformedURLException {
return "http://xbooru.com/index.php?page=dapi&s=post&q=index&pid=" + num + "&tags=" + getTerm(url);
}
@Override
public Document getFirstPage() throws IOException {
return Http.url(getPage(0)).get();
}
@Override
public Document getFirstPage() throws IOException {
return Http.url(getPage(0)).get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
int offset=Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
int num=Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
@Override
public Document getNextPage(Document doc) throws IOException {
int offset = Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
int num = Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
if(offset+100>num)
return null;
if (offset + 100 > num) {
return null;
}
return Http.url(getPage(offset/100+1)).get();
}
return Http.url(getPage(offset / 100 + 1)).get();
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> res=new ArrayList<String>(100);
for(Element e:page.getElementsByTag("post"))
res.add(e.absUrl("file_url")+"#"+e.attr("id"));
return res;
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> res = new ArrayList<String>(100);
for (Element e : page.getElementsByTag("post")) {
res.add(e.absUrl("file_url") + "#" + e.attr("id"));
}
return res;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url,Utils.getConfigBoolean("download.save_order",true)?url.getRef()+"-":"");
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
}
private String getTerm(URL url) throws MalformedURLException{
if(gidPattern==null)
gidPattern=Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
private String getTerm(URL url) throws MalformedURLException {
if (gidPattern == null) {
gidPattern = Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
}
Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches())
return m.group(4);
Matcher m = gidPattern.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(4);
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead");
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
}
@Override
public String getGID(URL url) throws MalformedURLException {
try {
return Utils.filesystemSafe(new URI(getTerm(url)).getPath());
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got "+url+" instead");
}
@Override
public String getGID(URL url) throws MalformedURLException {
try {
return Utils.filesystemSafe(new URI(getTerm(url)).getPath());
} catch (URISyntaxException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
}
}

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -14,10 +13,8 @@ import java.util.regex.Pattern;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
public class ZizkiRipper extends AbstractHTMLRipper {

View File

@ -3,22 +3,26 @@ package com.rarchives.ripme.ripper.rippers.ripperhelpers;
import java.util.List;
public class ChanSite {
//The domains where the threads are hosted.
// The domains where the threads are hosted.
public List<String> domains;
//The domains where the images are hosted.
// The domains where the images are hosted.
public List<String> cdnDomains;
public ChanSite(List<String> Domains, List<String> CdnDomains){
if(Domains.isEmpty())
public ChanSite(List<String> Domains, List<String> CdnDomains) {
if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains");
if(CdnDomains.isEmpty())
}
if (CdnDomains.isEmpty()) {
throw new IllegalArgumentException("CdnDomains");
}
domains = Domains;
cdnDomains = CdnDomains;
}
public ChanSite(List<String> Domains){
if(Domains.isEmpty())
public ChanSite(List<String> Domains) {
if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains");
}
domains = Domains;
cdnDomains = Domains;
}

View File

@ -69,9 +69,9 @@ public class PornhubRipper extends VideoRipper {
vidUrl = null;
for (String quality : new String[] {"quality_1080p", "quality_720p", "quality_480p", "quality_240p"}) {
Pattern pv = Pattern.compile("^.*var player_" + quality + " = '([^']*)'.*$", Pattern.DOTALL);
Matcher mv = pv.matcher(html);
if (mv.matches()) {
Pattern pv = Pattern.compile("^.*var player_" + quality + " = '([^']*)'.*$", Pattern.DOTALL);
Matcher mv = pv.matcher(html);
if (mv.matches()) {
vidUrl = mv.group(1);
break;
}

View File

@ -27,12 +27,12 @@ public class QueueMenuMouseListener extends MouseAdapter {
removeSelected = new AbstractAction("Remove Selected") {
@Override
public void actionPerformed(ActionEvent ae) {
Object o = queueList.getSelectedValue();
while (o != null) {
queueListModel.removeElement(o);
o = queueList.getSelectedValue();
}
updateUI();
Object o = queueList.getSelectedValue();
while (o != null) {
queueListModel.removeElement(o);
o = queueList.getSelectedValue();
}
updateUI();
}
};
popup.add(removeSelected);
@ -40,8 +40,8 @@ public class QueueMenuMouseListener extends MouseAdapter {
clearQueue = new AbstractAction("Remove All") {
@Override
public void actionPerformed(ActionEvent ae) {
queueListModel.removeAllElements();
updateUI();
queueListModel.removeAllElements();
updateUI();
}
};
popup.add(clearQueue);

View File

@ -3,16 +3,15 @@ package com.rarchives.ripme.utils;
/**
* Base64 encoder/decoder
* From http://stackoverflow.com/a/4265472
*
*/
public class Base64 {
private final static char[] ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray();
private static int[] toInt = new int[128];
private static int[] toInt = new int[128];
static {
for(int i=0; i< ALPHABET.length; i++){
toInt[ALPHABET[i]]= i;
for (int i = 0; i < ALPHABET.length; i++) {
toInt[ALPHABET[i]] = i;
}
}
@ -22,12 +21,12 @@ public class Base64 {
* @param buf the byte array (not null)
* @return the translated Base64 string (not null)
*/
public static String encode(byte[] buf){
public static String encode(byte[] buf) {
int size = buf.length;
char[] ar = new char[((size + 2) / 3) * 4];
int a = 0;
int i=0;
while(i < size){
int i = 0;
while (i < size) {
byte b0 = buf[i++];
byte b1 = (i < size) ? buf[i++] : 0;
byte b2 = (i < size) ? buf[i++] : 0;
@ -38,9 +37,9 @@ public class Base64 {
ar[a++] = ALPHABET[((b1 << 2) | ((b2 & 0xFF) >> 6)) & mask];
ar[a++] = ALPHABET[b2 & mask];
}
switch(size % 3){
case 1: ar[--a] = '=';
case 2: ar[--a] = '=';
switch (size % 3) {
case 1: ar[--a] = '=';
case 2: ar[--a] = '=';
}
return new String(ar);
}
@ -51,25 +50,25 @@ public class Base64 {
* @param s the Base64 string (not null)
* @return the byte array (not null)
*/
public static byte[] decode(String s){
int delta = s.endsWith( "==" ) ? 2 : s.endsWith( "=" ) ? 1 : 0;
byte[] buffer = new byte[s.length()*3/4 - delta];
public static byte[] decode(String s) {
int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0;
byte[] buffer = new byte[s.length() * 3 / 4 - delta];
int mask = 0xFF;
int index = 0;
for(int i=0; i< s.length(); i+=4){
int c0 = toInt[s.charAt( i )];
int c1 = toInt[s.charAt( i + 1)];
buffer[index++]= (byte)(((c0 << 2) | (c1 >> 4)) & mask);
if(index >= buffer.length){
for (int i = 0; i < s.length(); i += 4) {
int c0 = toInt[s.charAt(i)];
int c1 = toInt[s.charAt(i + 1)];
buffer[index++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask);
if (index >= buffer.length) {
return buffer;
}
int c2 = toInt[s.charAt( i + 2)];
buffer[index++]= (byte)(((c1 << 4) | (c2 >> 2)) & mask);
if(index >= buffer.length){
int c2 = toInt[s.charAt(i + 2)];
buffer[index++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask);
if (index >= buffer.length) {
return buffer;
}
int c3 = toInt[s.charAt( i + 3 )];
buffer[index++]= (byte)(((c2 << 6) | c3) & mask);
int c3 = toInt[s.charAt(i + 3)];
buffer[index++] = (byte) (((c2 << 6) | c3) & mask);
}
return buffer;
}

View File

@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.rippers.VidbleRipper;
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
public class RipUtils {
private static final Logger logger = Logger.getLogger(RipUtils.class);
@ -47,7 +46,7 @@ public class RipUtils {
return result;
}
else if (url.getHost().endsWith("imgur.com") && url.toExternalForm().contains(",")) {
// Imgur image series.
// Imgur image series.
try {
logger.debug("Fetching imgur series at " + url);
ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url);
@ -119,7 +118,7 @@ public class RipUtils {
}
if (url.getHost().equals("imgur.com") ||
url.getHost().equals("m.imgur.com")){
url.getHost().equals("m.imgur.com")) {
try {
// Fetch the page
Document doc = Jsoup.connect(url.toExternalForm())
@ -165,18 +164,6 @@ public class RipUtils {
if (url == null) url = urlFromSiteDirectoryName(dir, "vinebox", "http://finebox.co/u/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "imgbox", "http://imgbox.com/g/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "modelmayhem", "http://www.modelmayhem.com/", "");
/*
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
*/
//if (url == null) url = urlFromSiteDirectoryName(dir, "8muses", "http://www.8muses.com/index/category/", "");
return url;
}
@ -248,9 +235,8 @@ public class RipUtils {
List<String> fields = Arrays.asList(dir.split("_"));
String album = fields.get(1);
String url = "http://";
if ( (fields.contains("top") || fields.contains("new"))
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))
) {
if ((fields.contains("top") || fields.contains("new"))
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))) {
// Subreddit
fields.remove(0); // "imgur"
String sub = "";

View File

@ -34,7 +34,6 @@ import com.rarchives.ripme.ripper.AbstractRipper;
* Common utility functions used in various places throughout the project.
*/
public class Utils {
public static final String RIP_DIRECTORY = "rips";
private static final String configFile = "rip.properties";
private static final Logger logger = Logger.getLogger(Utils.class);
@ -50,7 +49,7 @@ public class Utils {
}
config = new PropertiesConfiguration(configPath);
logger.info("Loaded " + config.getPath());
if (f.exists()){
if (f.exists()) {
// Config was loaded from file
if ( !config.containsKey("twitter.auth")
|| !config.containsKey("twitter.max_requests")
@ -171,16 +170,18 @@ public class Utils {
public static String stripURLParameter(String url, String parameter) {
int paramIndex = url.indexOf("?" + parameter);
boolean wasFirstParam = true;
if(paramIndex < 0) {
if (paramIndex < 0) {
wasFirstParam = false;
paramIndex = url.indexOf("&" + parameter);
}
if(paramIndex > 0) {
if (paramIndex > 0) {
int nextParam = url.indexOf("&", paramIndex+1);
if(nextParam != -1) {
if (nextParam != -1) {
String c = "&";
if(wasFirstParam) c = "?";
if (wasFirstParam) {
c = "?";
}
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
} else {
url = url.substring(0, paramIndex);
@ -250,10 +251,10 @@ public class Utils {
jarPath = URLDecoder.decode(jarPath, "UTF-8");
JarFile jarFile = new JarFile(jarPath);
Enumeration<JarEntry> entries = jarFile.entries();
while(entries.hasMoreElements()) {
while (entries.hasMoreElements()) {
JarEntry nextElement = entries.nextElement();
String entryName = nextElement.getName();
if(entryName.startsWith(relPath)
if (entryName.startsWith(relPath)
&& entryName.length() > (relPath.length() + "/".length())
&& !nextElement.isDirectory()) {
String className = entryName.replace('/', '.').replace('\\', '.').replace(".class", "");
@ -401,7 +402,7 @@ public class Utils {
public static Map<String,String> parseUrlQuery(String query) {
Map<String,String> res = new HashMap<String, String>();
if (query.equals("")){
if (query.equals("")) {
return res;
}
@ -410,9 +411,9 @@ public class Utils {
try {
for (String part : parts) {
if ((pos = part.indexOf('=')) >= 0){
if ((pos = part.indexOf('=')) >= 0) {
res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), URLDecoder.decode(part.substring(pos + 1), "UTF-8"));
}else{
} else {
res.put(URLDecoder.decode(part, "UTF-8"), "");
}
}
@ -434,7 +435,7 @@ public class Utils {
* @return The associated value or null if key wasn't found
*/
public static String parseUrlQuery(String query, String key) {
if (query.equals("")){
if (query.equals("")) {
return null;
}
@ -444,7 +445,7 @@ public class Utils {
try {
for (String part : parts) {
if ((pos = part.indexOf('=')) >= 0) {
if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)){
if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)) {
return URLDecoder.decode(part.substring(pos + 1), "UTF-8");
}