Fix style

This commit is contained in:
MetaPrime 2017-06-19 10:32:57 -07:00
parent 153b206260
commit 71694196b9
23 changed files with 166 additions and 166 deletions

View File

@ -40,7 +40,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
public boolean canRip(URL url) {
return url.getHost().endsWith(getDomain());
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;

View File

@ -92,7 +92,7 @@ public abstract class AlbumRipper extends AbstractRipper {
* Uses filename from URL to decide filename.
* @param url
* URL to download
* @return
* @return
* True on success
*/
public boolean addURLToDownload(URL url) {
@ -139,7 +139,7 @@ public abstract class AlbumRipper extends AbstractRipper {
itemsPending.remove(url);
itemsCompleted.put(url, file);
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file.getAbsolutePath()));
checkIfComplete();
}
@ -160,8 +160,8 @@ public abstract class AlbumRipper extends AbstractRipper {
* Sets directory to save all ripped files to.
* @param url
* URL to define how the working directory should be saved.
* @throws
* IOException
* @throws
* IOException
*/
@Override
public void setWorkingDir(URL url) throws IOException {

View File

@ -135,7 +135,7 @@ public class DownloadVideoThread extends Thread {
observer.downloadCompleted(url, saveAs);
logger.info("[+] Saved " + url + " as " + this.prettySaveAs);
}
private int getTotalBytes(URL url) throws IOException {
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("HEAD");

View File

@ -109,20 +109,20 @@ public class CheebyRipper extends AbstractHTMLRipper {
}
return imageURLs;
}
@Override
public void rip() throws IOException {
logger.info("Retrieving " + this.url);
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
Document doc = getFirstPage();
while (doc != null) {
List<Image> images = getImagesFromPage(doc);
if (images.size() == 0) {
throw new IOException("No images found at " + doc.location());
}
for (Image image : images) {
if (isStopped()) {
break;
@ -167,7 +167,7 @@ public class CheebyRipper extends AbstractHTMLRipper {
}
}
}
private class Image {
String url, prefix;
int index;

View File

@ -46,7 +46,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
@Override
public void downloadURL(URL url, int index){
public void downloadURL(URL url, int index) {
addURLToDownload(url);
}
@Override
@ -107,12 +107,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override
public List<String> getURLsFromPage(Document doc){
public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(imageURL);
@ -120,8 +120,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(videoURL);
@ -129,18 +129,18 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
// Profile videos
Elements links = doc.select("div.item-container > a.item");
for (Element link : links){
for (Element link : links) {
Document video_page;
try {
video_page = Http.url("https://eroshare.com" + link.attr("href")).get();
} catch(IOException e) {
} catch (IOException e) {
logger.warn("Failed to log link in Jsoup");
video_page = null;
e.printStackTrace();
}
Elements profile_vids = video_page.getElementsByTag("video");
for (Element vid : profile_vids){
if (vid.hasClass("album-video")){
for (Element vid : profile_vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(videoURL);
@ -190,8 +190,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL));
@ -199,8 +199,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL));

View File

@ -1,101 +1,101 @@
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class FapprovedRipper extends AbstractHTMLRipper {
private int pageIndex = 1;
private String username = null;
public FapprovedRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "fapproved";
}
@Override
public String getDomain() {
return "fapproved.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://[w.]*fapproved.com/users/([a-zA-Z0-9\\-_]{3,}).*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
username = m.group(1);
return username;
}
throw new MalformedURLException("Fapproved user not found in " + url + ", expected http://fapproved.com/users/username/images");
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://fapproved.com/users/" + getGID(url));
}
@Override
public Document getFirstPage() throws IOException {
pageIndex = 1;
String pageURL = getPageURL(pageIndex);
return Http.url(pageURL)
.ignoreContentType()
.get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
if ((doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0)) {
throw new IOException("No more pages found");
}
sleep(1000);
pageIndex++;
String pageURL = getPageURL(pageIndex);
return Http.url(pageURL)
.ignoreContentType()
.get();
}
private String getPageURL(int index) throws IOException {
if (username == null) {
username = getGID(this.url);
}
return "http://fapproved.com/users/" + username + "/images?page=" + pageIndex;
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>();
for (Element image : page.select("div.actual-image img")) {
String imageURL = image.attr("src");
if (imageURL.startsWith("//")) {
imageURL = "http:" + imageURL;
}
else if (imageURL.startsWith("/")) {
imageURL = "http://fapproved.com" + imageURL;
}
imageURLs.add(imageURL);
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}
package com.rarchives.ripme.ripper.rippers;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class FapprovedRipper extends AbstractHTMLRipper {
private int pageIndex = 1;
private String username = null;
public FapprovedRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return "fapproved";
}
@Override
public String getDomain() {
return "fapproved.com";
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https?://[w.]*fapproved.com/users/([a-zA-Z0-9\\-_]{3,}).*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
username = m.group(1);
return username;
}
throw new MalformedURLException("Fapproved user not found in " + url + ", expected http://fapproved.com/users/username/images");
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://fapproved.com/users/" + getGID(url));
}
@Override
public Document getFirstPage() throws IOException {
pageIndex = 1;
String pageURL = getPageURL(pageIndex);
return Http.url(pageURL)
.ignoreContentType()
.get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
if ((doc.select("div.pagination li.next.disabled").size() != 0)
|| (doc.select("div.pagination").size() == 0)) {
throw new IOException("No more pages found");
}
sleep(1000);
pageIndex++;
String pageURL = getPageURL(pageIndex);
return Http.url(pageURL)
.ignoreContentType()
.get();
}
private String getPageURL(int index) throws IOException {
if (username == null) {
username = getGID(this.url);
}
return "http://fapproved.com/users/" + username + "/images?page=" + pageIndex;
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>();
for (Element image : page.select("div.actual-image img")) {
String imageURL = image.attr("src");
if (imageURL.startsWith("//")) {
imageURL = "http:" + imageURL;
}
else if (imageURL.startsWith("/")) {
imageURL = "http://fapproved.com" + imageURL;
}
imageURLs.add(imageURL);
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@ -63,7 +63,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
}
return new URL(sUrl);
}
public String getAlbumTitle(URL url) throws MalformedURLException {
if (!url.toExternalForm().contains("/sets/")) {
return super.getAlbumTitle(url);
@ -90,7 +90,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
// Root: https://www.flickr.com/photos/115858035@N04/
// Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/
final String domainRegex = "https?://[wm.]*flickr.com";
final String userRegex = "[a-zA-Z0-9@]+";
// Album
@ -159,7 +159,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
}
return Http.url(nextURL).get();
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>();
@ -194,7 +194,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
// Add image page to threadpool to grab the image & download it
@ -267,7 +267,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
logger.error("[!] Exception while loading/parsing " + this.url, e);
}
}
private Document getLargestImagePageDocument(URL url) throws IOException {
// Get current page
Document doc = Http.url(url).get();

View File

@ -51,7 +51,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
public URL sanitizeURL(URL url) throws MalformedURLException {
return new URL("http://gifyo.com/" + getGID(url) + "/");
}
@Override
public Document getFirstPage() throws IOException {
Response resp = Http.url(this.url)
@ -66,7 +66,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
}
return doc;
}
@Override
public Document getNextPage(Document doc) throws IOException {
page++;
@ -89,7 +89,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
sleep(2000);
return nextDoc;
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<String>();
@ -105,7 +105,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
logger.debug("Found " + imageURLs.size() + " images");
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url);

View File

@ -43,7 +43,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
+ "imagestash.org/tag/tagname"
+ " Got: " + url);
}
@Override
public JSONObject getFirstPage() throws IOException {
String baseURL = "https://imagestash.org/images?tags="
@ -51,7 +51,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
+ "&page=" + page;
return Http.url(baseURL).getJSON();
}
@Override
public JSONObject getNextPage(JSONObject json) throws IOException {
int count = json.getInt("count"),
@ -64,7 +64,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
page++;
return getFirstPage();
}
@Override
public List<String> getURLsFromJSON(JSONObject json) {
List<String> imageURLs = new ArrayList<String>();
@ -79,7 +79,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));

View File

@ -121,7 +121,7 @@ public class InstagramRipper extends AbstractJSONRipper {
imageURL = imageURL.replaceAll("\\?ig_cache_key.+$", "");
return imageURL;
}
private String getMedia(JSONObject data) {
String imageURL = "";
if (data.has("videos")) {
@ -131,14 +131,14 @@ public class InstagramRipper extends AbstractJSONRipper {
}
return imageURL;
}
@Override
public List<String> getURLsFromJSON(JSONObject json) {
List<String> imageURLs = new ArrayList<String>();
JSONArray datas = json.getJSONArray("items");
for (int i = 0; i < datas.length(); i++) {
JSONObject data = (JSONObject) datas.get(i);
String dataType = data.getString("type");
if (dataType.equals("carousel")) {
JSONArray carouselMedias = data.getJSONArray("carousel_media");

View File

@ -55,7 +55,7 @@ public class ModelmayhemRipper extends AlbumRipper {
.method(Method.GET)
.execute();
cookies = resp.cookies();
resp = Jsoup.connect("http://www.modelmayhem.com/includes/js/auth.php")
.cookies(cookies)
.ignoreContentType(true)

View File

@ -94,7 +94,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
try {
logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e) {
} catch (IOException e) {
logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null;
e.printStackTrace();
@ -122,7 +122,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Document doc;
try {
doc = Http.url("http://myhentaicomics.com" + url).get();
} catch(IOException e){
} catch (IOException e) {
logger.warn("Failed to log link in Jsoup");
doc = null;
e.printStackTrace();
@ -144,7 +144,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else {
album_doc = Http.url(element).get();
}
} catch(IOException e){
} catch (IOException e) {
logger.warn("Failed to log link in Jsoup");
album_doc = null;
e.printStackTrace();
@ -168,7 +168,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
result.add("http://myhentaicomics.com/" + imageSource);
addURLToDownload(new URL("http://myhentaicomics.com/" + imageSource), "", url_string.split("/")[6]);
}
catch(MalformedURLException e) {
catch (MalformedURLException e) {
logger.warn("Malformed URL");
e.printStackTrace();
}

View File

@ -41,7 +41,7 @@ public class NfsfwRipper extends AlbumRipper {
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
try {
@ -185,7 +185,7 @@ public class NfsfwRipper extends AlbumRipper {
}
}
}
private class Pair {
public String first, second;
public Pair(String first, String second) {

View File

@ -45,7 +45,7 @@ public class PhotobucketRipper extends AlbumRipper {
return url;
}
}
public String getAlbumTitle(URL url) throws MalformedURLException {
try {
// Attempt to use album title as GID
@ -165,7 +165,7 @@ public class PhotobucketRipper extends AlbumRipper {
JSONObject object = objects.getJSONObject(i);
String image = object.getString("fullsizeUrl");
filesIndex += 1;
addURLToDownload(new URL(image),
addURLToDownload(new URL(image),
"",
object.getString("location").replaceAll(" ", "_"),
albumDoc.location(),
@ -179,7 +179,7 @@ public class PhotobucketRipper extends AlbumRipper {
return new ArrayList<String>();
}
}
private List<String> getSubAlbums(String url, String currentAlbumPath) {
List<String> result = new ArrayList<String>();
String subdomain = url.substring(url.indexOf("://")+3);
@ -193,7 +193,7 @@ public class PhotobucketRipper extends AlbumRipper {
JSONObject json = Http.url(apiUrl).getJSON();
JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
for (int i = 0; i < subalbums.length(); i++) {
String suburl =
String suburl =
"http://"
+ subdomain
+ ".photobucket.com"

View File

@ -31,7 +31,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
public String getHost() {
return "sankakucomplex";
}
@Override
public String getDomain() {
return "sankakucomplex.com";
@ -52,7 +52,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
"idol.sankakucomplex.com?...&tags=something... - got " +
url + "instead");
}
@Override
public Document getFirstPage() throws IOException {
if (albumDoc == null) {
@ -62,7 +62,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
}
return albumDoc;
}
@Override
public List<String> getURLsFromPage(Document doc) {
List<String> imageURLs = new ArrayList<String>();
@ -78,7 +78,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
// Mock up the URL of the post page based on the post ID at the end of the URL.
@ -91,7 +91,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
Element pagination = doc.select("div.pagination").first();
if (pagination.hasAttr("next-page-url")) {
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
} else{
} else {
return null;
}
}

View File

@ -34,7 +34,7 @@ public class TeenplanetRipper extends AlbumRipper {
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
public String getAlbumTitle(URL url) throws MalformedURLException {
try {
// Attempt to use album title as GID

View File

@ -160,7 +160,7 @@ public class VkRipper extends AlbumRipper {
}
waitForThreads();
}
private Map<String,String> getPhotoIDsToURLs(String photoID) throws IOException {
Map<String,String> photoIDsToURLs = new HashMap<String,String>();
Map<String,String> postData = new HashMap<String,String>();

View File

@ -30,7 +30,7 @@ public class MotherlessVideoRipper extends VideoRipper {
Matcher m = p.matcher(url.toExternalForm());
return m.matches();
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;

View File

@ -31,7 +31,7 @@ public class ViddmeRipper extends VideoRipper {
Matcher m = p.matcher(url.toExternalForm());
return m.matches();
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;

View File

@ -16,7 +16,7 @@ public class RipStatusComplete {
this.dir = dir;
this.count = count;
}
public String getDir() {
String result;
try {

View File

@ -31,7 +31,7 @@ public class AES {
nBits = nBits / 8;
byte[] data = Base64.decode(cipherText);
byte[] k = Arrays.copyOf(key.getBytes(), nBits);
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
SecretKey secretKey = generateSecretKey(k, nBits);
byte[] nonceBytes = Arrays.copyOf(Arrays.copyOf(data, 8), nBits / 2);

View File

@ -17,7 +17,7 @@ import com.rarchives.ripme.ripper.AbstractRipper;
/**
* Wrapper around the Jsoup connection methods.
*
*
* Benefit is retry logic.
*/
public class Http {
@ -38,7 +38,7 @@ public class Http {
this.url = url.toExternalForm();
defaultSettings();
}
public static Http url(String url) {
return new Http(url);
}

View File

@ -8,7 +8,7 @@ import java.util.Map;
import com.rarchives.ripme.ripper.rippers.ImagefapRipper;
public class ImagefapRipperTest extends RippersTest {
public void testImagefapAlbums() throws IOException {
Map<URL, String> testURLs = new HashMap<URL, String>();
// Album with specific title