Replaced use of Collection.size () by Collection.isEmpty () which makes the code more readable and may have more performance
This commit is contained in:
parent
d58a80b5e6
commit
fa8f7b5c00
@ -330,7 +330,7 @@ public class App {
|
||||
} else {
|
||||
logger.info("Loading history from configuration");
|
||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||
if (HISTORY.toList().size() == 0) {
|
||||
if (HISTORY.toList().isEmpty()) {
|
||||
// Loaded from config, still no entries.
|
||||
// Guess rip history based on rip folder
|
||||
String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory());
|
||||
|
@ -111,7 +111,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty()) {
|
||||
throw new IOException("No images found at " + doc.location());
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
|
||||
logger.debug("Fetching description(s) from " + doc.location());
|
||||
List<String> textURLs = getDescriptionsFromPage(doc);
|
||||
if (textURLs.size() > 0) {
|
||||
if (!textURLs.isEmpty()) {
|
||||
logger.debug("Found description link(s) from " + doc.location());
|
||||
for (String textURL : textURLs) {
|
||||
if (isStopped()) {
|
||||
|
@ -63,7 +63,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty()) {
|
||||
throw new IOException("No images found at " + this.url);
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class BcfakesRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements hrefs = doc.select("a.next");
|
||||
if (hrefs.size() == 0) {
|
||||
if (hrefs.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = "http://www.bcfakes.com" + hrefs.first().attr("href");
|
||||
|
@ -241,8 +241,8 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
return null;
|
||||
}
|
||||
Elements nextButtons = page.select("link[rel=\"next\"]");
|
||||
if (nextButtons.size() == 0) {
|
||||
if (page.select("link[rel=\"prev\"]").size() == 0) {
|
||||
if (nextButtons.isEmpty()) {
|
||||
if (page.select("link[rel=\"prev\"]").isEmpty()) {
|
||||
throw new IOException("No next page found");
|
||||
} else {
|
||||
throw new IOException("Hit end of pages");
|
||||
@ -376,7 +376,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
Elements els = doc.select("img.dev-content-full");
|
||||
String fsimage = null;
|
||||
// Get the largest resolution image on the page
|
||||
if (els.size() > 0) {
|
||||
if (!els.isEmpty()) {
|
||||
// Large image
|
||||
fsimage = els.get(0).attr("src");
|
||||
logger.info("Found large-scale: " + fsimage);
|
||||
@ -386,7 +386,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
// Try to find the download button
|
||||
els = doc.select("a.dev-page-download");
|
||||
if (els.size() > 0) {
|
||||
if (!els.isEmpty()) {
|
||||
// Full-size image
|
||||
String downloadLink = els.get(0).attr("href");
|
||||
logger.info("Found download button link: " + downloadLink);
|
||||
|
@ -60,7 +60,7 @@ public class DrawcrowdRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Elements loadMore = doc.select("a#load-more");
|
||||
if (loadMore.size() == 0) {
|
||||
if (loadMore.isEmpty()) {
|
||||
throw new IOException("No next page found");
|
||||
}
|
||||
if (!sleep(1000)) {
|
||||
|
@ -49,7 +49,7 @@ public class DribbbleRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements hrefs = doc.select("a.next_page");
|
||||
if (hrefs.size() == 0) {
|
||||
if (hrefs.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = "https://www.dribbble.com" + hrefs.first().attr("href");
|
||||
|
@ -186,7 +186,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
// Find next page
|
||||
Elements hrefs = doc.select(".ptt a");
|
||||
if (hrefs.size() == 0) {
|
||||
if (hrefs.isEmpty()) {
|
||||
logger.info("doc: " + doc.html());
|
||||
throw new IOException("No navigation links found");
|
||||
}
|
||||
@ -255,10 +255,10 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
|
||||
// Find image
|
||||
Elements images = doc.select(".sni > a > img");
|
||||
if (images.size() == 0) {
|
||||
if (images.isEmpty()) {
|
||||
// Attempt to find image elsewise (Issue #41)
|
||||
images = doc.select("img#img");
|
||||
if (images.size() == 0) {
|
||||
if (images.isEmpty()) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
catch (IOException e) {
|
||||
logger.error("Error fetching full-size image from " + rawUrl, e);
|
||||
}
|
||||
if (images.size() > 0) {
|
||||
if (!images.isEmpty()) {
|
||||
imageURL = images.first().attr("src");
|
||||
logger.debug("Found full-size non-watermarked image: " + imageURL);
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
Document doc = getLargestImagePageDocument(this.url);
|
||||
Elements fullsizeImages = doc.select("div#allsizes-photo img");
|
||||
if (fullsizeImages.size() == 0) {
|
||||
if (fullsizeImages.isEmpty()) {
|
||||
logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
|
||||
}
|
||||
else {
|
||||
@ -274,7 +274,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
||||
String largestImagePage = this.url.toExternalForm();
|
||||
for (Element olSize : doc.select("ol.sizes-list > li > ol > li")) {
|
||||
Elements ola = olSize.select("a");
|
||||
if (ola.size() == 0) {
|
||||
if (ola.isEmpty()) {
|
||||
largestImagePage = this.url.toExternalForm();
|
||||
}
|
||||
else {
|
||||
|
@ -68,7 +68,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements nextPageUrl = doc.select("a.right");
|
||||
if (nextPageUrl.size() == 0) {
|
||||
if (nextPageUrl.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = urlBase + nextPageUrl.first().attr("href");
|
||||
@ -121,7 +121,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
|
||||
// Try to find the description
|
||||
Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
|
||||
if (els.size() == 0) {
|
||||
if (els.isEmpty()) {
|
||||
logger.debug("No description at " + page);
|
||||
throw new IOException("No description found");
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.select("li.next.hidden").size() != 0) {
|
||||
if (!doc.select("li.next.hidden").isEmpty()) {
|
||||
// Last page
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
|
||||
.get();
|
||||
// Find image
|
||||
Elements images = doc.select("a > img");
|
||||
if (images.size() == 0) {
|
||||
if (images.isEmpty()) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
|
@ -304,10 +304,10 @@ public class ImgurRipper extends AlbumRipper {
|
||||
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
||||
for (Element thumb : doc.select("div.image")) {
|
||||
String image;
|
||||
if (thumb.select("a.zoom").size() > 0) {
|
||||
if (!thumb.select("a.zoom").isEmpty()) {
|
||||
// Clickably full-size
|
||||
image = "http:" + thumb.select("a").attr("href");
|
||||
} else if (thumb.select("img").size() > 0) {
|
||||
} else if (!thumb.select("img").isEmpty()) {
|
||||
image = "http:" + thumb.select("img").attr("src");
|
||||
} else {
|
||||
// Unable to find image in this div
|
||||
@ -449,7 +449,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
URL imageURL = new URL(image);
|
||||
addURLToDownload(imageURL);
|
||||
}
|
||||
if (imgs.size() == 0) {
|
||||
if (imgs.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
page++;
|
||||
|
@ -287,7 +287,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
}
|
||||
try {
|
||||
if (!data.getBoolean("is_video")) {
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty()) {
|
||||
// We add this one item to the array because either wise
|
||||
// the ripper will error out because we returned an empty array
|
||||
imageURLs.add(getOriginalUrl(data.getString("display_url")));
|
||||
|
@ -78,7 +78,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
List<Pair> subAlbums = new ArrayList<>();
|
||||
int index = 0;
|
||||
subAlbums.add(new Pair(this.url.toExternalForm(), ""));
|
||||
while (subAlbums.size() > 0) {
|
||||
while (!subAlbums.isEmpty()) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
@ -167,7 +167,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
.referrer(this.url)
|
||||
.get();
|
||||
Elements images = doc.select(".gbBlock img");
|
||||
if (images.size() == 0) {
|
||||
if (images.isEmpty()) {
|
||||
logger.error("Failed to find image at " + this.url);
|
||||
return;
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
}
|
||||
Document albumDoc = pageResponse.parse();
|
||||
Elements els = albumDoc.select("div.libraryTitle > h1");
|
||||
if (els.size() == 0) {
|
||||
if (els.isEmpty()) {
|
||||
throw new IOException("Could not find libraryTitle at " + url);
|
||||
}
|
||||
return els.get(0).text();
|
||||
@ -92,7 +92,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
subsToRip.add(sub);
|
||||
}
|
||||
|
||||
while (subsToRip.size() > 0 && !isStopped()) {
|
||||
while (!subsToRip.isEmpty() && !isStopped()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -91,7 +91,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
|
||||
// Find thumbnails
|
||||
Elements thumbs = albumDoc.select(".photoBlockBox li");
|
||||
if (thumbs.size() == 0) {
|
||||
if (thumbs.isEmpty()) {
|
||||
logger.debug("albumDoc: " + albumDoc);
|
||||
logger.debug("No images found at " + nextUrl);
|
||||
return;
|
||||
|
@ -228,7 +228,7 @@ public class TwitterRipper extends AlbumRipper {
|
||||
int parsedCount = 0;
|
||||
for (int i = 0; i < MAX_REQUESTS; i++) {
|
||||
List<JSONObject> tweets = getTweets(getApiURL(lastMaxID - 1));
|
||||
if (tweets.size() == 0) {
|
||||
if (tweets.isEmpty()) {
|
||||
logger.info(" No more tweets found.");
|
||||
break;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper {
|
||||
Document nextDoc = Http.url(url)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
if (nextDoc.select("div.hcaption > img").size() == 0) {
|
||||
if (nextDoc.select("div.hcaption > img").isEmpty()) {
|
||||
throw new IOException("No more images to retrieve");
|
||||
}
|
||||
return nextDoc;
|
||||
|
@ -72,7 +72,7 @@ public class GfycatRipper extends VideoRipper {
|
||||
|
||||
Document doc = Http.url(url).get();
|
||||
Elements videos = doc.select("source#mp4Source");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find source#mp4source at " + url);
|
||||
}
|
||||
String vidUrl = videos.first().attr("src");
|
||||
|
@ -58,7 +58,7 @@ public class MotherlessVideoRipper extends VideoRipper {
|
||||
logger.error("WTF");
|
||||
}
|
||||
List<String> vidUrls = Utils.between(html, "__fileurl = '", "';");
|
||||
if (vidUrls.size() == 0) {
|
||||
if (vidUrls.isEmpty()) {
|
||||
throw new IOException("Could not find video URL at " + url);
|
||||
}
|
||||
String vidUrl = vidUrls.get(0);
|
||||
|
@ -62,7 +62,7 @@ public class TwitchVideoRipper extends VideoRipper {
|
||||
String title = doc.title();
|
||||
|
||||
Elements script = doc.select("script");
|
||||
if (script.size() == 0) {
|
||||
if (script.isEmpty()) {
|
||||
throw new IOException("Could not find script code at " + url);
|
||||
}
|
||||
//Regex assumes highest quality source is listed first
|
||||
|
@ -57,7 +57,7 @@ public class VidearnRipper extends VideoRipper {
|
||||
logger.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
List<String> mp4s = Utils.between(doc.html(), "file:\"", "\"");
|
||||
if (mp4s.size() == 0) {
|
||||
if (mp4s.isEmpty()) {
|
||||
throw new IOException("Could not find files at " + url);
|
||||
}
|
||||
String vidUrl = mp4s.get(0);
|
||||
|
@ -57,7 +57,7 @@ public class VineRipper extends VideoRipper {
|
||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements props = doc.select("meta[property=twitter:player:stream]");
|
||||
if (props.size() == 0) {
|
||||
if (props.isEmpty()) {
|
||||
throw new IOException("Could not find meta property 'twitter:player:stream' at " + url);
|
||||
}
|
||||
String vidUrl = props.get(0).attr("content");
|
||||
|
@ -57,7 +57,7 @@ public class XhamsterRipper extends VideoRipper {
|
||||
logger.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
Elements videos = doc.select("div.player-container > a");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find Embed code at " + url);
|
||||
}
|
||||
String vidUrl = videos.attr("href");
|
||||
|
@ -57,7 +57,7 @@ public class YoupornRipper extends VideoRipper {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements videos = doc.select("video");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find Embed code at " + url);
|
||||
}
|
||||
Element video = videos.get(0);
|
||||
|
@ -65,7 +65,7 @@ public class YuvutuRipper extends VideoRipper {
|
||||
throw new IOException("Could not find iframe code at " + url);
|
||||
}
|
||||
Elements script = doc.select("script");
|
||||
if (script.size() == 0) {
|
||||
if (script.isEmpty()) {
|
||||
throw new IOException("Could not find script code at " + url);
|
||||
}
|
||||
Pattern p = Pattern.compile("file: \"(.*?)\"");
|
||||
|
@ -987,7 +987,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
} else {
|
||||
logger.info(rb.getString("loading.history.from.configuration"));
|
||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||
if (HISTORY.toList().size() == 0) {
|
||||
if (HISTORY.toList().isEmpty()) {
|
||||
// Loaded from config, still no entries.
|
||||
// Guess rip history based on rip folder
|
||||
String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory());
|
||||
|
Loading…
Reference in New Issue
Block a user