Spacing around keywords and braces

This commit is contained in:
MetaPrime 2017-05-09 17:50:32 -07:00
parent eb979cc1f6
commit 7268a10ef3
7 changed files with 21 additions and 24 deletions

View File

@ -146,7 +146,7 @@ public class App {
try { try {
String url; String url;
BufferedReader br = new BufferedReader(new FileReader(filename)); BufferedReader br = new BufferedReader(new FileReader(filename));
while((url = br.readLine()) != null) { while ((url = br.readLine()) != null) {
// loop through each url in the file and proces each url individually. // loop through each url in the file and proces each url individually.
ripURL(url.trim(), cl.hasOption("n")); ripURL(url.trim(), cl.hasOption("n"));
} }

View File

@ -158,13 +158,13 @@ public class ChanRipper extends AbstractHTMLRipper {
Boolean self_hosted = false; Boolean self_hosted = false;
if (!generalChanSite) { if (!generalChanSite) {
for (String cdnDomain : chanSite.cdnDomains) { for (String cdnDomain : chanSite.cdnDomains) {
if (href.contains(cdnDomain)){ if (href.contains(cdnDomain)) {
self_hosted = true; self_hosted = true;
} }
} }
} }
if (self_hosted || generalChanSite){ if (self_hosted || generalChanSite) {
p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE); p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm)$", Pattern.CASE_INSENSITIVE);
m = p.matcher(href); m = p.matcher(href);
if (m.matches()) { if (m.matches()) {
@ -194,7 +194,7 @@ public class ChanRipper extends AbstractHTMLRipper {
} }
List<URL> urls = RipUtils.getFilesFromURL(originalURL); List<URL> urls = RipUtils.getFilesFromURL(originalURL);
for(URL imageurl : urls){ for (URL imageurl : urls) {
imageURLs.add(imageurl.toString()); imageURLs.add(imageurl.toString());
} }
} }

View File

@ -9,7 +9,6 @@ import java.io.IOException;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.URL; import java.net.URL;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
import org.jsoup.Connection.Method;
import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Http;
/** /**
@ -45,7 +42,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
@Override @Override
public void downloadURL(URL url, int index){ public void downloadURL(URL url, int index) {
addURLToDownload(url); addURLToDownload(url);
} }
@ -66,12 +63,12 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override @Override
public List<String> getURLsFromPage(Document doc){ public List<String> getURLsFromPage(Document doc) {
List<String> URLs = new ArrayList<String>(); List<String> URLs = new ArrayList<String>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(imageURL); URLs.add(imageURL);
@ -79,8 +76,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(videoURL); URLs.add(videoURL);
@ -122,8 +119,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
List<URL> URLs = new ArrayList<URL>(); List<URL> URLs = new ArrayList<URL>();
//Pictures //Pictures
Elements imgs = doc.getElementsByTag("img"); Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs){ for (Element img : imgs) {
if (img.hasClass("album-image")){ if (img.hasClass("album-image")) {
String imageURL = img.attr("src"); String imageURL = img.attr("src");
imageURL = "https:" + imageURL; imageURL = "https:" + imageURL;
URLs.add(new URL(imageURL)); URLs.add(new URL(imageURL));
@ -131,8 +128,8 @@ public class EroShareRipper extends AbstractHTMLRipper {
} }
//Videos //Videos
Elements vids = doc.getElementsByTag("video"); Elements vids = doc.getElementsByTag("video");
for (Element vid : vids){ for (Element vid : vids) {
if (vid.hasClass("album-video")){ if (vid.hasClass("album-video")) {
Elements source = vid.getElementsByTag("source"); Elements source = vid.getElementsByTag("source");
String videoURL = source.first().attr("src"); String videoURL = source.first().attr("src");
URLs.add(new URL(videoURL)); URLs.add(new URL(videoURL));

View File

@ -86,13 +86,13 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
List<String> albumPagesList = new ArrayList<String>(); List<String> albumPagesList = new ArrayList<String>();
int pageNumber = 1; int pageNumber = 1;
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber)); albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
while(true) { while (true) {
String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber); String urlToGet = "http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber);
Document nextAlbumPage; Document nextAlbumPage;
try { try {
logger.info("Grabbing " + urlToGet); logger.info("Grabbing " + urlToGet);
nextAlbumPage = Http.url(urlToGet).get(); nextAlbumPage = Http.url(urlToGet).get();
} catch(IOException e){ } catch(IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
nextAlbumPage = null; nextAlbumPage = null;
e.printStackTrace(); e.printStackTrace();
@ -100,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first(); Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href"); String nextPage = elem.attr("href");
pageNumber = pageNumber + 1; pageNumber = pageNumber + 1;
if (nextPage == ""){ if (nextPage == "") {
logger.info("Got " + pageNumber + " pages"); logger.info("Got " + pageNumber + " pages");
break; break;
} }
@ -136,7 +136,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
else { else {
album_doc = Http.url(element).get(); album_doc = Http.url(element).get();
} }
} catch(IOException e){ } catch(IOException e) {
logger.warn("Failed to log link in Jsoup"); logger.warn("Failed to log link in Jsoup");
album_doc = null; album_doc = null;
e.printStackTrace(); e.printStackTrace();

View File

@ -132,7 +132,7 @@ public class RedditRipper extends AlbumRipper {
JSONArray jsonArray = new JSONArray(); JSONArray jsonArray = new JSONArray();
if (jsonObj instanceof JSONObject) { if (jsonObj instanceof JSONObject) {
jsonArray.put( (JSONObject) jsonObj); jsonArray.put( (JSONObject) jsonObj);
} else if (jsonObj instanceof JSONArray){ } else if (jsonObj instanceof JSONArray) {
jsonArray = (JSONArray) jsonObj; jsonArray = (JSONArray) jsonObj;
} else { } else {
logger.warn("[!] Unable to parse JSON: " + jsonString); logger.warn("[!] Unable to parse JSON: " + jsonString);

View File

@ -118,7 +118,7 @@ public class RipUtils {
} }
if (url.getHost().equals("imgur.com") || if (url.getHost().equals("imgur.com") ||
url.getHost().equals("m.imgur.com")){ url.getHost().equals("m.imgur.com")) {
try { try {
// Fetch the page // Fetch the page
Document doc = Jsoup.connect(url.toExternalForm()) Document doc = Jsoup.connect(url.toExternalForm())

View File

@ -251,7 +251,7 @@ public class Utils {
jarPath = URLDecoder.decode(jarPath, "UTF-8"); jarPath = URLDecoder.decode(jarPath, "UTF-8");
JarFile jarFile = new JarFile(jarPath); JarFile jarFile = new JarFile(jarPath);
Enumeration<JarEntry> entries = jarFile.entries(); Enumeration<JarEntry> entries = jarFile.entries();
while(entries.hasMoreElements()) { while (entries.hasMoreElements()) {
JarEntry nextElement = entries.nextElement(); JarEntry nextElement = entries.nextElement();
String entryName = nextElement.getName(); String entryName = nextElement.getName();
if (entryName.startsWith(relPath) if (entryName.startsWith(relPath)