Fix style

This commit is contained in:
MetaPrime 2017-06-19 10:32:57 -07:00
parent 153b206260
commit 71694196b9
23 changed files with 166 additions and 166 deletions

View File

@ -40,7 +40,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
public boolean canRip(URL url) { public boolean canRip(URL url) {
return url.getHost().endsWith(getDomain()); return url.getHost().endsWith(getDomain());
} }
@Override @Override
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return url; return url;

View File

@ -92,7 +92,7 @@ public abstract class AlbumRipper extends AbstractRipper {
* Uses filename from URL to decide filename. * Uses filename from URL to decide filename.
* @param url * @param url
* URL to download * URL to download
* @return * @return
* True on success * True on success
*/ */
public boolean addURLToDownload(URL url) { public boolean addURLToDownload(URL url) {
@ -139,7 +139,7 @@ public abstract class AlbumRipper extends AbstractRipper {
itemsPending.remove(url); itemsPending.remove(url);
itemsCompleted.put(url, file); itemsCompleted.put(url, file);
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath())); observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath()));
checkIfComplete(); checkIfComplete();
} }
@ -160,8 +160,8 @@ public abstract class AlbumRipper extends AbstractRipper {
* Sets directory to save all ripped files to. * Sets directory to save all ripped files to.
* @param url * @param url
* URL to define how the working directory should be saved. * URL to define how the working directory should be saved.
* @throws * @throws
* IOException * IOException
*/ */
@Override @Override
public void setWorkingDir(URL url) throws IOException { public void setWorkingDir(URL url) throws IOException {

View File

@ -135,7 +135,7 @@ public class DownloadVideoThread extends Thread {
observer.downloadCompleted(url, saveAs); observer.downloadCompleted(url, saveAs);
logger.info("[+] Saved " + url + " as " + this.prettySaveAs); logger.info("[+] Saved " + url + " as " + this.prettySaveAs);
} }
private int getTotalBytes(URL url) throws IOException { private int getTotalBytes(URL url) throws IOException {
HttpURLConnection conn = (HttpURLConnection) url.openConnection(); HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("HEAD"); conn.setRequestMethod("HEAD");

View File

@ -109,20 +109,20 @@ public class CheebyRipper extends AbstractHTMLRipper {
} }
return imageURLs; return imageURLs;
} }
@Override @Override
public void rip() throws IOException { public void rip() throws IOException {
logger.info("Retrieving " + this.url); logger.info("Retrieving " + this.url);
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
Document doc = getFirstPage(); Document doc = getFirstPage();
while (doc != null) { while (doc != null) {
List<Image> images = getImagesFromPage(doc); List<Image> images = getImagesFromPage(doc);
if (images.size() == 0) { if (images.size() == 0) {
throw new IOException("No images found at " + doc.location()); throw new IOException("No images found at " + doc.location());
} }
for (Image image : images) { for (Image image : images) {
if (isStopped()) { if (isStopped()) {
break; break;
@ -167,7 +167,7 @@ public class CheebyRipper extends AbstractHTMLRipper {
} }
} }
} }
private class Image { private class Image {
String url, prefix; String url, prefix;
int index; int index;

View File

@ -46,7 +46,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
@Override @Override
public void downloadURL(URL url, int index){ public void downloadURL(URL url, int index) {
addURLToDownload(url); addURLToDownload(url);
} }
@Override @Override
@ -107,12 +107,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override @Override
public List<String> getURLsFromPage(Document doc){ public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>(); List<String> URLs = new ArrayList<String>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(imageURL); URLs.add(imageURL);
@ -120,8 +120,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(videoURL); URLs.add(videoURL);
@ -129,18 +129,18 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
// Profile videos // Profile videos
Elements links = doc.select("div.item-container > a.item"); Elements links = doc.select("div.item-container > a.item");
for (Element link : links){ for (Element link : links) {
Document video_page; Document video_page;
try { try {
video_page = Http.url("https://eroshare.com" + link.attr("href")).get(); video_page = Http.url("https://eroshare.com" + link.attr("href")).get();
} catch(IOException e) { } catch (IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
video_page = null; video_page = null;
e.printStackTrace(); e.printStackTrace();
} }
Elements profile_vids = video_page.getElementsByTag("video"); Elements profile_vids = video_page.getElementsByTag("video");
for (Element vid : profile_vids){ for (Element vid : profile_vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(videoURL); URLs.add(videoURL);
@ -190,8 +190,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>(); List<URL> URLs = new ArrayList<URL>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL)); URLs.add(new URL(imageURL));
@ -199,8 +199,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL)); URLs.add(new URL(videoURL));

View File

@ -1,101 +1,101 @@
package com.rarchives.ripme.ripper.rippers; package com.rarchives.ripme.ripper.rippers;
import java.io.IOException; import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
public class FapprovedRipper extends AbstractHTMLRipper { public class FapprovedRipper extends AbstractHTMLRipper {
private int pageIndex = 1; private int pageIndex = 1;
private String username = null; private String username = null;
public FapprovedRipper(URL url) throws IOException { public FapprovedRipper(URL url) throws IOException {
super(url); super(url);
} }
@Override @Override
public String getHost() { public String getHost() {
return "fapproved"; return "fapproved";
} }
@Override @Override
public String getDomain() { public String getDomain() {
return "fapproved.com"; return "fapproved.com";
} }
@Override @Override
public String getGID(URL url) throws MalformedURLException { public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://[w.]*fapproved.com/users/([a-zA-Z0-9\\-_]{3,}).*$"); Pattern p = Pattern.compile("^https?://[w.]*fapproved.com/users/([a-zA-Z0-9\\-_]{3,}).*$");
Matcher m = p.matcher(url.toExternalForm()); Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) { if (m.matches()) {
username = m.group(1); username = m.group(1);
return username; return username;
} }
throw new MalformedURLException("Fapproved user not found in " + url + ", expected http://fapproved.com/users/username/images"); throw new MalformedURLException("Fapproved user not found in " + url + ", expected http://fapproved.com/users/username/images");
} }
@Override @Override
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://fapproved.com/users/" + getGID(url)); return new URL("http://fapproved.com/users/" + getGID(url));
} }
@Override @Override
public Document getFirstPage() throws IOException { public Document getFirstPage() throws IOException {
pageIndex = 1; pageIndex = 1;
String pageURL = getPageURL(pageIndex); String pageURL = getPageURL(pageIndex);
return Http.url(pageURL) return Http.url(pageURL)
.ignoreContentType() .ignoreContentType()
.get(); .get();
} }
@Override @Override
public Document getNextPage(Document doc) throws IOException { public Document getNextPage(Document doc) throws IOException {
if ((doc.select("div.pagination li.next.disabled").size() != 0) if ((doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0)) { || (doc.select("div.pagination").size() == 0)) {
throw new IOException("No more pages found"); throw new IOException("No more pages found");
} }
sleep(1000); sleep(1000);
pageIndex++; pageIndex++;
String pageURL = getPageURL(pageIndex); String pageURL = getPageURL(pageIndex);
return Http.url(pageURL) return Http.url(pageURL)
.ignoreContentType() .ignoreContentType()
.get(); .get();
} }
private String getPageURL(int index) throws IOException { private String getPageURL(int index) throws IOException {
if (username == null) { if (username == null) {
username = getGID(this.url); username = getGID(this.url);
} }
return "http://fapproved.com/users/" + username + "/images?page=" + pageIndex; return "http://fapproved.com/users/" + username + "/images?page=" + pageIndex;
} }
@Override @Override
public List<String> getURLsFromPage(Document page) { public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
for (Element image : page.select("div.actual-image img")) { for (Element image : page.select("div.actual-image img")) {
String imageURL = image.attr("src"); String imageURL = image.attr("src");
if (imageURL.startsWith("//")) { if (imageURL.startsWith("//")) {
imageURL = "http:" + imageURL; imageURL = "http:" + imageURL;
} }
else if (imageURL.startsWith("/")) { else if (imageURL.startsWith("/")) {
imageURL = "http://fapproved.com" + imageURL; imageURL = "http://fapproved.com" + imageURL;
} }
imageURLs.add(imageURL); imageURLs.add(imageURL);
} }
return imageURLs; return imageURLs;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index)); addURLToDownload(url, getPrefix(index));
} }
} }

View File

@ -63,7 +63,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
} }
return new URL(sUrl); return new URL(sUrl);
} }
public String getAlbumTitle(URL url) throws MalformedURLException { public String getAlbumTitle(URL url) throws MalformedURLException {
if (!url.toExternalForm().contains("/sets/")) { if (!url.toExternalForm().contains("/sets/")) {
return super.getAlbumTitle(url); return super.getAlbumTitle(url);
@ -90,7 +90,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
// Root: https://www.flickr.com/photos/115858035@N04/ // Root: https://www.flickr.com/photos/115858035@N04/
// Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/ // Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/
final String domainRegex = "https?://[wm.]*flickr.com"; final String domainRegex = "https?://[wm.]*flickr.com";
final String userRegex = "[a-zA-Z0-9@]+"; final String userRegex = "[a-zA-Z0-9@]+";
// Album // Album
@ -159,7 +159,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
} }
return Http.url(nextURL).get(); return Http.url(nextURL).get();
} }
@Override @Override
public List<String> getURLsFromPage(Document page) { public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
@ -194,7 +194,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
} }
return imageURLs; return imageURLs;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
// Add image page to threadpool to grab the image & download it // Add image page to threadpool to grab the image & download it
@ -267,7 +267,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
logger.error("[!] Exception while loading/parsing " + this.url, e); logger.error("[!] Exception while loading/parsing " + this.url, e);
} }
} }
private Document getLargestImagePageDocument(URL url) throws IOException { private Document getLargestImagePageDocument(URL url) throws IOException {
// Get current page // Get current page
Document doc = Http.url(url).get(); Document doc = Http.url(url).get();

View File

@ -51,7 +51,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://gifyo.com/" + getGID(url) + "/"); return new URL("http://gifyo.com/" + getGID(url) + "/");
} }
@Override @Override
public Document getFirstPage() throws IOException { public Document getFirstPage() throws IOException {
Response resp = Http.url(this.url) Response resp = Http.url(this.url)
@ -66,7 +66,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
} }
return doc; return doc;
} }
@Override @Override
public Document getNextPage(Document doc) throws IOException { public Document getNextPage(Document doc) throws IOException {
page++; page++;
@ -89,7 +89,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
sleep(2000); sleep(2000);
return nextDoc; return nextDoc;
} }
@Override @Override
public List<String> getURLsFromPage(Document doc) { public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
@ -105,7 +105,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
logger.debug("Found " + imageURLs.size() + " images"); logger.debug("Found " + imageURLs.size() + " images");
return imageURLs; return imageURLs;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
addURLToDownload(url); addURLToDownload(url);

View File

@ -43,7 +43,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
+ "imagestash.org/tag/tagname" + "imagestash.org/tag/tagname"
+ " Got: " + url); + " Got: " + url);
} }
@Override @Override
public JSONObject getFirstPage() throws IOException { public JSONObject getFirstPage() throws IOException {
String baseURL = "https://imagestash.org/images?tags=" String baseURL = "https://imagestash.org/images?tags="
@ -51,7 +51,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
+ "&page=" + page; + "&page=" + page;
return Http.url(baseURL).getJSON(); return Http.url(baseURL).getJSON();
} }
@Override @Override
public JSONObject getNextPage(JSONObject json) throws IOException { public JSONObject getNextPage(JSONObject json) throws IOException {
int count = json.getInt("count"), int count = json.getInt("count"),
@ -64,7 +64,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
page++; page++;
return getFirstPage(); return getFirstPage();
} }
@Override @Override
public List<String> getURLsFromJSON(JSONObject json) { public List<String> getURLsFromJSON(JSONObject json) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
@ -79,7 +79,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
} }
return imageURLs; return imageURLs;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index)); addURLToDownload(url, getPrefix(index));

View File

@ -121,7 +121,7 @@ public class InstagramRipper extends AbstractJSONRipper {
imageURL = imageURL.replaceAll("\\?ig_cache_key.+$", ""); imageURL = imageURL.replaceAll("\\?ig_cache_key.+$", "");
return imageURL; return imageURL;
} }
private String getMedia(JSONObject data) { private String getMedia(JSONObject data) {
String imageURL = ""; String imageURL = "";
if (data.has("videos")) { if (data.has("videos")) {
@ -131,14 +131,14 @@ public class InstagramRipper extends AbstractJSONRipper {
} }
return imageURL; return imageURL;
} }
@Override @Override
public List<String> getURLsFromJSON(JSONObject json) { public List<String> getURLsFromJSON(JSONObject json) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
JSONArray datas = json.getJSONArray("items"); JSONArray datas = json.getJSONArray("items");
for (int i = 0; i < datas.length(); i++) { for (int i = 0; i < datas.length(); i++) {
JSONObject data = (JSONObject) datas.get(i); JSONObject data = (JSONObject) datas.get(i);
String dataType = data.getString("type"); String dataType = data.getString("type");
if (dataType.equals("carousel")) { if (dataType.equals("carousel")) {
JSONArray carouselMedias = data.getJSONArray("carousel_media"); JSONArray carouselMedias = data.getJSONArray("carousel_media");

View File

@ -55,7 +55,7 @@ public class ModelmayhemRipper extends AlbumRipper {
.method(Method.GET) .method(Method.GET)
.execute(); .execute();
cookies = resp.cookies(); cookies = resp.cookies();
resp = Jsoup.connect("http://www.modelmayhem.com/includes/js/auth.php") resp = Jsoup.connect("http://www.modelmayhem.com/includes/js/auth.php")
.cookies(cookies) .cookies(cookies)
.ignoreContentType(true) .ignoreContentType(true)

View File

@ -94,7 +94,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
try { try {
logger.info("Grabbing " + urlToGet); logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get(); nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e) { } catch (IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null; nextAlbumPage = null;
e.printStackTrace(); e.printStackTrace();
@ -122,7 +122,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Document doc; Document doc;
try { try {
doc = Http.url("http://myhentaicomics.com" + url).get(); doc = Http.url("http://myhentaicomics.com" + url).get();
} catch(IOException e){ } catch (IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
doc = null; doc = null;
e.printStackTrace(); e.printStackTrace();
@ -144,7 +144,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else { else {
album_doc = Http.url(element).get(); album_doc = Http.url(element).get();
} }
} catch(IOException e){ } catch (IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
album_doc = null; album_doc = null;
e.printStackTrace(); e.printStackTrace();
@ -168,7 +168,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
result.add("http://myhentaicomics.com/" + imageSource); result.add("http://myhentaicomics.com/" + imageSource);
addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]); addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]);
} }
catch(MalformedURLException e) { catch (MalformedURLException e) {
logger.warn("Malformed URL"); logger.warn("Malformed URL");
e.printStackTrace(); e.printStackTrace();
} }

View File

@ -41,7 +41,7 @@ public class NfsfwRipper extends AlbumRipper {
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return url; return url;
} }
@Override @Override
public String getAlbumTitle(URL url) throws MalformedURLException { public String getAlbumTitle(URL url) throws MalformedURLException {
try { try {
@ -185,7 +185,7 @@ public class NfsfwRipper extends AlbumRipper {
} }
} }
} }
private class Pair { private class Pair {
public String first, second; public String first, second;
public Pair(String first, String second) { public Pair(String first, String second) {

View File

@ -45,7 +45,7 @@ public class PhotobucketRipper extends AlbumRipper {
return url; return url;
} }
} }
public String getAlbumTitle(URL url) throws MalformedURLException { public String getAlbumTitle(URL url) throws MalformedURLException {
try { try {
// Attempt to use album title as GID // Attempt to use album title as GID
@ -165,7 +165,7 @@ public class PhotobucketRipper extends AlbumRipper {
JSONObject object = objects.getJSONObject(i); JSONObject object = objects.getJSONObject(i);
String image = object.getString("fullsizeUrl"); String image = object.getString("fullsizeUrl");
filesIndex += 1; filesIndex += 1;
addURLToDownload(new URL(image), addURLToDownload(new URL(image),
"", "",
object.getString("location").replaceAll(" ", "_"), object.getString("location").replaceAll(" ", "_"),
albumDoc.location(), albumDoc.location(),
@ -179,7 +179,7 @@ public class PhotobucketRipper extends AlbumRipper {
return new ArrayList<String>(); return new ArrayList<String>();
} }
} }
private List<String> getSubAlbums(String url, String currentAlbumPath) { private List<String> getSubAlbums(String url, String currentAlbumPath) {
List<String> result = new ArrayList<String>(); List<String> result = new ArrayList<String>();
String subdomain = url.substring(url.indexOf("://")+3); String subdomain = url.substring(url.indexOf("://")+3);
@ -193,7 +193,7 @@ public class PhotobucketRipper extends AlbumRipper {
JSONObject json = Http.url(apiUrl).getJSON(); JSONObject json = Http.url(apiUrl).getJSON();
JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums"); JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
for (int i = 0; i < subalbums.length(); i++) { for (int i = 0; i < subalbums.length(); i++) {
String suburl = String suburl =
"http://" "http://"
+ subdomain + subdomain
+ ".photobucket.com" + ".photobucket.com"

View File

@ -31,7 +31,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
public String getHost() { public String getHost() {
return "sankakucomplex"; return "sankakucomplex";
} }
@Override @Override
public String getDomain() { public String getDomain() {
return "sankakucomplex.com"; return "sankakucomplex.com";
@ -52,7 +52,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
"idol.sankakucomplex.com?...&tags=something... - got " + "idol.sankakucomplex.com?...&tags=something... - got " +
url + "instead"); url + "instead");
} }
@Override @Override
public Document getFirstPage() throws IOException { public Document getFirstPage() throws IOException {
if (albumDoc == null) { if (albumDoc == null) {
@ -62,7 +62,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
} }
return albumDoc; return albumDoc;
} }
@Override @Override
public List<String> getURLsFromPage(Document doc) { public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<String>(); List<String> imageURLs = new ArrayList<String>();
@ -78,7 +78,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
} }
return imageURLs; return imageURLs;
} }
@Override @Override
public void downloadURL(URL url, int index) { public void downloadURL(URL url, int index) {
// Mock up the URL of the post page based on the post ID at the end of the URL. // Mock up the URL of the post page based on the post ID at the end of the URL.
@ -91,7 +91,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
Element pagination = doc.select("div.pagination").first(); Element pagination = doc.select("div.pagination").first();
if (pagination.hasAttr("next-page-url")) { if (pagination.hasAttr("next-page-url")) {
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get(); return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
} else{ } else {
return null; return null;
} }
} }

View File

@ -34,7 +34,7 @@ public class TeenplanetRipper extends AlbumRipper {
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return url; return url;
} }
public String getAlbumTitle(URL url) throws MalformedURLException { public String getAlbumTitle(URL url) throws MalformedURLException {
try { try {
// Attempt to use album title as GID // Attempt to use album title as GID

View File

@ -160,7 +160,7 @@ public class VkRipper extends AlbumRipper {
} }
waitForThreads(); waitForThreads();
} }
private Map<String,String> getPhotoIDsToURLs(String photoID) throws IOException { private Map<String,String> getPhotoIDsToURLs(String photoID) throws IOException {
Map<String,String> photoIDsToURLs = new HashMap<String,String>(); Map<String,String> photoIDsToURLs = new HashMap<String,String>();
Map<String,String> postData = new HashMap<String,String>(); Map<String,String> postData = new HashMap<String,String>();

View File

@ -30,7 +30,7 @@ public class MotherlessVideoRipper extends VideoRipper {
Matcher m = p.matcher(url.toExternalForm()); Matcher m = p.matcher(url.toExternalForm());
return m.matches(); return m.matches();
} }
@Override @Override
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return url; return url;

View File

@ -31,7 +31,7 @@ public class ViddmeRipper extends VideoRipper {
Matcher m = p.matcher(url.toExternalForm()); Matcher m = p.matcher(url.toExternalForm());
return m.matches(); return m.matches();
} }
@Override @Override
public URL sanitizeURL(URL url) throws MalformedURLException { public URL sanitizeURL(URL url) throws MalformedURLException {
return url; return url;

View File

@ -16,7 +16,7 @@ public class RipStatusComplete {
this.dir = dir; this.dir = dir;
this.count = count; this.count = count;
} }
public String getDir() { public String getDir() {
String result; String result;
try { try {

View File

@ -31,7 +31,7 @@ public class AES {
nBits = nBits / 8; nBits = nBits / 8;
byte[] data = Base64.decode(cipherText); byte[] data = Base64.decode(cipherText);
byte[] k = Arrays.copyOf(key.getBytes(), nBits); byte[] k = Arrays.copyOf(key.getBytes(), nBits);
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding"); Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
SecretKey secretKey = generateSecretKey(k, nBits); SecretKey secretKey = generateSecretKey(k, nBits);
byte[] nonceBytes = Arrays.copyOf(Arrays.copyOf(data, 8), nBits / 2); byte[] nonceBytes = Arrays.copyOf(Arrays.copyOf(data, 8), nBits / 2);

View File

@ -17,7 +17,7 @@ import com.rarchives.ripme.ripper.AbstractRipper;
/** /**
* Wrapper around the Jsoup connection methods. * Wrapper around the Jsoup connection methods.
* *
* Benefit is retry logic. * Benefit is retry logic.
*/ */
public class Http { public class Http {
@ -38,7 +38,7 @@ public class Http {
this.url = url.toExternalForm(); this.url = url.toExternalForm();
defaultSettings(); defaultSettings();
} }
public static Http url(String url) { public static Http url(String url) {
return new Http(url); return new Http(url);
} }

View File

@ -8,7 +8,7 @@ import java.util.Map;
import com.rarchives.ripme.ripper.rippers.ImagefapRipper; import com.rarchives.ripme.ripper.rippers.ImagefapRipper;
public class ImagefapRipperTest extends RippersTest { public class ImagefapRipperTest extends RippersTest {
public void testImagefapAlbums() throws IOException { public void testImagefapAlbums() throws IOException {
Map<URL, String> testURLs = new HashMap<URL, String>(); Map<URL, String> testURLs = new HashMap<URL, String>();
// Album with specific title // Album with specific title