Spacing around keywords and braces

This commit is contained in:
MetaPrime 2017-05-09 17:50:32 -07:00
parent eb979cc1f6
commit 7268a10ef3
7 changed files with 21 additions and 24 deletions

View File

@ -146,7 +146,7 @@ public class App {
try {
String url;
BufferedReader br = new BufferedReader(new FileReader(filename));
while((url = br.readLine()) != null) {
while ((url = br.readLine()) != null) {
// loop through each url in the file and proces each url individually.
ripURL(url.trim(), cl.hasOption("n"));
}

View File

@ -158,13 +158,13 @@ public class ChanRipper extends AbstractHTMLRipper {
Boolean self_hosted = false;
if (!generalChanSite) {
for (String cdnDomain : chanSite.cdnDomains) {
if (href.contains(cdnDomain)){
if (href.contains(cdnDomain)) {
self_hosted = true;
}
}
}
if (self_hosted || generalChanSite){
if (self_hosted || generalChanSite) {
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(href);
if (m.matches()) {
@ -194,7 +194,7 @@ public class ChanRipper extends AbstractHTMLRipper {
}
List<URL> urls = RipUtils.getFilesFromURL(originalURL);
for(URL imageurl : urls){
for (URL imageurl : urls) {
imageURLs.add(imageurl.toString());
}
}

View File

@ -9,7 +9,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.jsoup.Connection.Method;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
/**
@ -45,7 +42,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
@Override
public void downloadURL(URL url, int index){
public void downloadURL(URL url, int index) {
addURLToDownload(url);
}
@ -66,12 +63,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override
public List<String> getURLsFromPage(Document doc){
public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(imageURL);
@ -79,8 +76,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(videoURL);
@ -122,8 +119,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){
if (img.hasClass("album-image")){
for (Element img : imgs) {
if (img.hasClass("album-image")) {
String imageURL = img.attr("src");
imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL));
@ -131,8 +128,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
}
//Videos
Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){
if (vid.hasClass("album-video")){
for (Element vid : vids) {
if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL));

View File

@ -86,13 +86,13 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
List<String> albumPagesList = new ArrayList<String>();
int pageNumber = 1;
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
while(true) {
while (true) {
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
Document nextAlbumPage;
try {
logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e){
} catch(IOException e) {
logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null;
e.printStackTrace();
@ -100,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href");
pageNumber = pageNumber + 1;
if (nextPage == ""){
if (nextPage == "") {
logger.info("Got " + pageNumber + " pages");
break;
}
@ -136,7 +136,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else {
album_doc = Http.url(element).get();
}
} catch(IOException e){
} catch(IOException e) {
logger.warn("Failed to log link in Jsoup");
album_doc = null;
e.printStackTrace();

View File

@ -132,7 +132,7 @@ public class RedditRipper extends AlbumRipper {
JSONArray jsonArray = new JSONArray();
if (jsonObj instanceof JSONObject) {
jsonArray.put( (JSONObject) jsonObj);
} else if (jsonObj instanceof JSONArray){
} else if (jsonObj instanceof JSONArray) {
jsonArray = (JSONArray) jsonObj;
} else {
logger.warn("[!] Unable to parse JSON: " + jsonString);

View File

@ -118,7 +118,7 @@ public class RipUtils {
}
if (url.getHost().equals("imgur.com") ||
url.getHost().equals("m.imgur.com")){
url.getHost().equals("m.imgur.com")) {
try {
// Fetch the page
Document doc = Jsoup.connect(url.toExternalForm())

View File

@ -251,7 +251,7 @@ public class Utils {
jarPath = URLDecoder.decode(jarPath, "UTF-8");
JarFile jarFile = new JarFile(jarPath);
Enumeration<JarEntry> entries = jarFile.entries();
while(entries.hasMoreElements()) {
while (entries.hasMoreElements()) {
JarEntry nextElement = entries.nextElement();
String entryName = nextElement.getName();
if (entryName.startsWith(relPath)