diff --git a/README.md b/README.md
index 5979aa9f..d93c2964 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
[![Build Status](https://travis-ci.org/4pr0n/ripme.svg?branch=master)](https://travis-ci.org/4pr0n/ripme)
[![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-Album ripper for various websites. Runs on your computer. Requires Java 1.6
+Album ripper for various websites. Runs on your computer. Requires Java 8.
![Screenshot](http://i.imgur.com/kWzhsIu.png)
diff --git a/pom.xml b/pom.xml
index 8ec74f5d..a1095c29 100644
--- a/pom.xml
+++ b/pom.xml
@@ -84,8 +84,8 @@
maven-compiler-plugin
3.1
-
- 1.6
+
+ 1.8
diff --git a/src/main/java/com/rarchives/ripme/App.java b/src/main/java/com/rarchives/ripme/App.java
index 51a16cef..b03780c8 100644
--- a/src/main/java/com/rarchives/ripme/App.java
+++ b/src/main/java/com/rarchives/ripme/App.java
@@ -62,13 +62,13 @@ public class App {
}
}
- public static void rip(URL url) throws Exception {
+ private static void rip(URL url) throws Exception {
AbstractRipper ripper = AbstractRipper.getRipper(url);
ripper.setup();
ripper.rip();
}
- public static void handleArguments(String[] args) {
+ private static void handleArguments(String[] args) {
CommandLine cl = getArgs(args);
if (cl.hasOption('h')) {
HelpFormatter hf = new HelpFormatter();
@@ -172,7 +172,7 @@ public class App {
}
// this function will attempt to rip the provided url
- public static void ripURL(String targetURL, boolean saveConfig) {
+ private static void ripURL(String targetURL, boolean saveConfig) {
try {
URL url = new URL(targetURL);
rip(url);
@@ -193,7 +193,7 @@ public class App {
}
}
- public static Options getOptions() {
+ private static Options getOptions() {
Options opts = new Options();
opts.addOption("h", "help", false, "Print the help");
opts.addOption("u", "url", true, "URL of album to rip");
@@ -211,11 +211,10 @@ public class App {
return opts;
}
- public static CommandLine getArgs(String[] args) {
+ private static CommandLine getArgs(String[] args) {
BasicParser parser = new BasicParser();
try {
- CommandLine cl = parser.parse(getOptions(), args, false);
- return cl;
+ return parser.parse(getOptions(), args, false);
} catch (ParseException e) {
logger.error("[!] Error while parsing command-line arguments: " + Arrays.toString(args), e);
System.exit(-1);
@@ -244,12 +243,7 @@ public class App {
if (HISTORY.toList().size() == 0) {
// Loaded from config, still no entries.
// Guess rip history based on rip folder
- String[] dirs = Utils.getWorkingDirectory().list(new FilenameFilter() {
- @Override
- public boolean accept(File dir, String file) {
- return new File(dir.getAbsolutePath() + File.separator + file).isDirectory();
- }
- });
+ String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory());
for (String dir : dirs) {
String url = RipUtils.urlFromDirectoryName(dir);
if (url != null) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
index cdab1664..3b7496a0 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
@@ -17,27 +17,27 @@ import com.rarchives.ripme.utils.Utils;
*/
public abstract class AbstractHTMLRipper extends AlbumRipper {
- public AbstractHTMLRipper(URL url) throws IOException {
+ protected AbstractHTMLRipper(URL url) throws IOException {
super(url);
}
- public abstract String getDomain();
+ protected abstract String getDomain();
public abstract String getHost();
- public abstract Document getFirstPage() throws IOException;
+ protected abstract Document getFirstPage() throws IOException;
public Document getNextPage(Document doc) throws IOException {
return null;
}
- public abstract List getURLsFromPage(Document page);
- public List getDescriptionsFromPage(Document doc) throws IOException {
+ protected abstract List getURLsFromPage(Document page);
+ protected List getDescriptionsFromPage(Document doc) throws IOException {
throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
}
- public abstract void downloadURL(URL url, int index);
- public DownloadThreadPool getThreadPool() {
+ protected abstract void downloadURL(URL url, int index);
+ protected DownloadThreadPool getThreadPool() {
return null;
}
- public boolean keepSortOrder() {
+ protected boolean keepSortOrder() {
return true;
}
@@ -50,13 +50,13 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
- public boolean hasDescriptionSupport() {
+ protected boolean hasDescriptionSupport() {
return false;
}
- public String[] getDescription(String url,Document page) throws IOException {
+ protected String[] getDescription(String url, Document page) throws IOException {
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
}
- public int descSleepTime() {
+ protected int descSleepTime() {
return 100;
}
@Override
@@ -140,7 +140,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
}
waitForThreads();
}
- public String fileNameFromURL(URL url) {
+ private String fileNameFromURL(URL url) {
String saveAs = url.toExternalForm();
if (saveAs.substring(saveAs.length() - 1) == "/") { saveAs = saveAs.substring(0,saveAs.length() - 1) ;}
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
@@ -154,7 +154,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
String saveAs = fileNameFromURL(url);
return saveText(url,subdirectory,text,index,saveAs);
}
- public boolean saveText(URL url, String subdirectory, String text, int index, String fileName) {
+ private boolean saveText(URL url, String subdirectory, String text, int index, String fileName) {
// Not the best for some cases, like FurAffinity. Overridden there.
try {
stopCheck();
@@ -189,7 +189,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
}
return true;
}
- public String getPrefix(int index) {
+ protected String getPrefix(int index) {
String prefix = "";
if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
prefix = String.format("%03d_", index);
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
index 4c21821e..a315bcdc 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
@@ -15,24 +15,24 @@ import com.rarchives.ripme.utils.Utils;
*/
public abstract class AbstractJSONRipper extends AlbumRipper {
- public AbstractJSONRipper(URL url) throws IOException {
+ protected AbstractJSONRipper(URL url) throws IOException {
super(url);
}
- public abstract String getDomain();
+ protected abstract String getDomain();
public abstract String getHost();
- public abstract JSONObject getFirstPage() throws IOException;
- public JSONObject getNextPage(JSONObject doc) throws IOException {
+ protected abstract JSONObject getFirstPage() throws IOException;
+ protected JSONObject getNextPage(JSONObject doc) throws IOException {
throw new IOException("getNextPage not implemented");
}
- public abstract List getURLsFromJSON(JSONObject json);
- public abstract void downloadURL(URL url, int index);
- public DownloadThreadPool getThreadPool() {
+ protected abstract List getURLsFromJSON(JSONObject json);
+ protected abstract void downloadURL(URL url, int index);
+ private DownloadThreadPool getThreadPool() {
return null;
}
- public boolean keepSortOrder() {
+ protected boolean keepSortOrder() {
return true;
}
@@ -96,7 +96,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
waitForThreads();
}
- public String getPrefix(int index) {
+ protected String getPrefix(int index) {
String prefix = "";
if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
prefix = String.format("%03d_", index);
diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
index 8a343cd9..46f1f414 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
@@ -32,10 +32,10 @@ public abstract class AbstractRipper
protected URL url;
protected File workingDir;
- protected DownloadThreadPool threadPool;
- protected RipStatusHandler observer = null;
+ DownloadThreadPool threadPool;
+ RipStatusHandler observer = null;
- protected boolean completed = true;
+ private boolean completed = true;
public abstract void rip() throws IOException;
public abstract String getHost();
@@ -110,9 +110,9 @@ public abstract class AbstractRipper
* The cookies to send to the server while downloading this file.
* @return
*/
- public abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
+ protected abstract boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies);
- public boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies) {
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies) {
try {
stopCheck();
} catch (IOException e) {
@@ -159,7 +159,7 @@ public abstract class AbstractRipper
* Sub-directory of the working directory to save the images to.
* @return True on success, flase on failure.
*/
- public boolean addURLToDownload(URL url, String prefix, String subdirectory) {
+ protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
return addURLToDownload(url, prefix, subdirectory, null, null);
}
@@ -172,7 +172,7 @@ public abstract class AbstractRipper
* Text to append to saved filename.
* @return True on success, flase on failure.
*/
- public boolean addURLToDownload(URL url, String prefix) {
+ protected boolean addURLToDownload(URL url, String prefix) {
// Use empty subdirectory
return addURLToDownload(url, prefix, "");
}
@@ -223,14 +223,14 @@ public abstract class AbstractRipper
/**
* @return Number of files downloaded.
*/
- public int getCount() {
+ int getCount() {
return 1;
}
/**
* Notifies observers and updates state if all files have been ripped.
*/
- protected void checkIfComplete() {
+ void checkIfComplete() {
if (observer == null) {
logger.debug("observer is null");
return;
@@ -320,10 +320,10 @@ public abstract class AbstractRipper
* @throws Exception
*/
public static List> getRipperConstructors(String pkg) throws Exception {
- List> constructors = new ArrayList>();
+ List> constructors = new ArrayList<>();
for (Class> clazz : Utils.getClassesForPackage(pkg)) {
if (AbstractRipper.class.isAssignableFrom(clazz)) {
- constructors.add( (Constructor>) clazz.getConstructor(URL.class) );
+ constructors.add(clazz.getConstructor(URL.class));
}
}
return constructors;
@@ -355,10 +355,6 @@ public abstract class AbstractRipper
logger.error("Got exception while running ripper:", e);
waitForThreads();
sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
- } catch (IOException e) {
- logger.error("Got exception while running ripper:", e);
- waitForThreads();
- sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
} catch (Exception e) {
logger.error("Got exception while running ripper:", e);
waitForThreads();
@@ -368,7 +364,7 @@ public abstract class AbstractRipper
}
}
- public void cleanup() {
+ private void cleanup() {
if (this.workingDir.list().length == 0) {
// No files, delete the dir
logger.info("Deleting empty directory " + this.workingDir);
@@ -379,7 +375,7 @@ public abstract class AbstractRipper
}
}
- public boolean sleep(int milliseconds) {
+ protected boolean sleep(int milliseconds) {
try {
logger.debug("Sleeping " + milliseconds + "ms");
Thread.sleep(milliseconds);
@@ -402,7 +398,7 @@ public abstract class AbstractRipper
logger.debug("THIS IS A TEST RIP");
thisIsATest = true;
}
- public boolean isThisATest() {
+ protected boolean isThisATest() {
return thisIsATest;
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
index 50df96c4..a5b2c91e 100644
--- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
@@ -15,11 +15,11 @@ import com.rarchives.ripme.utils.Utils;
public abstract class AlbumRipper extends AbstractRipper {
- protected Map itemsPending = Collections.synchronizedMap(new HashMap());
- protected Map itemsCompleted = Collections.synchronizedMap(new HashMap());
- protected Map itemsErrored = Collections.synchronizedMap(new HashMap());
+ private Map itemsPending = Collections.synchronizedMap(new HashMap());
+ private Map itemsCompleted = Collections.synchronizedMap(new HashMap());
+ private Map itemsErrored = Collections.synchronizedMap(new HashMap());
- public AlbumRipper(URL url) throws IOException {
+ protected AlbumRipper(URL url) throws IOException {
super(url);
}
@@ -29,7 +29,7 @@ public abstract class AlbumRipper extends AbstractRipper {
public abstract String getHost();
public abstract String getGID(URL url) throws MalformedURLException;
- public boolean allowDuplicates() {
+ protected boolean allowDuplicates() {
return false;
}
@@ -95,7 +95,7 @@ public abstract class AlbumRipper extends AbstractRipper {
* @return
* True on success
*/
- public boolean addURLToDownload(URL url) {
+ protected boolean addURLToDownload(URL url) {
// Use empty prefix and empty subdirectory
return addURLToDownload(url, "", "");
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
index 8a346fb4..c62d58a6 100644
--- a/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
+++ b/src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
@@ -24,12 +24,12 @@ import com.rarchives.ripme.utils.Utils;
* Thread for downloading files.
* Includes retry logic, observer notifications, and other goodies.
*/
-public class DownloadFileThread extends Thread {
+class DownloadFileThread extends Thread {
private static final Logger logger = Logger.getLogger(DownloadFileThread.class);
private String referrer = "";
- private Map cookies = new HashMap();
+ private Map cookies = new HashMap<>();
private URL url;
private File saveAs;
diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java
index 85e375f1..ee97ce60 100644
--- a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java
+++ b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java
@@ -20,7 +20,7 @@ import com.rarchives.ripme.utils.Utils;
* Thread for downloading files.
* Includes retry logic, observer notifications, and other goodies.
*/
-public class DownloadVideoThread extends Thread {
+class DownloadVideoThread extends Thread {
private static final Logger logger = Logger.getLogger(DownloadVideoThread.class);
diff --git a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java
index 9951f9bc..57ff8418 100644
--- a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java
+++ b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java
@@ -8,7 +8,7 @@ import java.net.URL;
* I have no idea why I made this interface. Everything is captured within the AbstractRipper.
* Oh well, here's to encapsulation and abstraction! (raises glass)
*/
-public interface RipperInterface {
+interface RipperInterface {
void rip() throws IOException;
boolean canRip(URL url);
URL sanitizeURL(URL url) throws MalformedURLException;
diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
index 6cdc06fd..62be7858 100644
--- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
@@ -16,7 +16,7 @@ public abstract class VideoRipper extends AbstractRipper {
private int bytesTotal = 1,
bytesCompleted = 1;
- public VideoRipper(URL url) throws IOException {
+ protected VideoRipper(URL url) throws IOException {
super(url);
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
index 11349716..43e881b8 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java
@@ -23,7 +23,7 @@ import java.util.HashMap;
public class AerisdiesRipper extends AbstractHTMLRipper {
private Document albumDoc = null;
- private Map cookies = new HashMap();
+ private Map cookies = new HashMap<>();
public AerisdiesRipper(URL url) throws IOException {
@@ -74,7 +74,7 @@ public class AerisdiesRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
Elements albumElements = page.select("div.imgbox > a > img");
for (Element imageBox : albumElements) {
String imageUrl = imageBox.attr("src");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java
index 682519cc..1138b364 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java
@@ -66,7 +66,7 @@ public class BcfakesRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("div.ngg-gallery-thumbnail > a > img")) {
String imageURL = thumb.attr("src");
imageURL = imageURL.replace("thumbs/thumbs_", "");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ButttoucherRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ButttoucherRipper.java
index 23e73342..77a25902 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ButttoucherRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ButttoucherRipper.java
@@ -33,7 +33,7 @@ public class ButttoucherRipper extends AbstractHTMLRipper {
public String getGID(URL url) throws MalformedURLException {
Pattern p; Matcher m;
- p = Pattern.compile("^.*butttoucher.com/users/([a-zA-Z0-9_\\-]{1,}).*$");
+ p = Pattern.compile("^.*butttoucher.com/users/([a-zA-Z0-9_\\-]+).*$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
@@ -51,7 +51,7 @@ public class ButttoucherRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List thumbs = new ArrayList();
+ List thumbs = new ArrayList<>();
for (Element thumb : page.select(".thumb img")) {
if (!thumb.hasAttr("src")) {
continue;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
index f50956c6..e233f66f 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
@@ -71,7 +71,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List result = new ArrayList();
+ List result = new ArrayList<>();
for (Element el : doc.select("table.display > tbody > tr > td > table > tbody > tr > td > a")) {
if (el.attr("href").contains("upload")) {
return result;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
index e9993321..ba5cdad8 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
@@ -18,7 +18,7 @@ import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.RipUtils;
public class ChanRipper extends AbstractHTMLRipper {
- public static List explicit_domains = Arrays.asList(
+ private static List explicit_domains = Arrays.asList(
new ChanSite(Arrays.asList("boards.4chan.org"), Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org")),
new ChanSite(Arrays.asList("archive.moe"), Arrays.asList("data.archive.moe")),
new ChanSite(Arrays.asList("4archive.org"), Arrays.asList("imgur.com")),
@@ -26,15 +26,15 @@ public class ChanRipper extends AbstractHTMLRipper {
new ChanSite(Arrays.asList("fgts.jp"), Arrays.asList("dat.fgtsi.org"))
);
- public static List url_piece_blacklist = Arrays.asList(
+ private static List url_piece_blacklist = Arrays.asList(
"=http",
"http://imgops.com/",
"iqdb.org",
"saucenao.com"
);
- public ChanSite chanSite;
- public Boolean generalChanSite = true;
+ private ChanSite chanSite;
+ private Boolean generalChanSite = true;
public ChanRipper(URL url) throws IOException {
super(url);
@@ -143,7 +143,7 @@ public class ChanRipper extends AbstractHTMLRipper {
}
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
Pattern p; Matcher m;
for (Element link : page.select("a")) {
if (!link.hasAttr("href")) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheebyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheebyRipper.java
index c90cfae8..e7f8ef3a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheebyRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheebyRipper.java
@@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Http;
public class CheebyRipper extends AbstractHTMLRipper {
private int offset = 0;
- private Map albumSets = new HashMap();
+ private Map albumSets = new HashMap<>();
public CheebyRipper(URL url) throws IOException {
super(url);
@@ -81,8 +81,8 @@ public class CheebyRipper extends AbstractHTMLRipper {
return null;
}
- public List getImagesFromPage(Document page) {
- List imageURLs = new ArrayList();
+ private List getImagesFromPage(Document page) {
+ List imageURLs = new ArrayList<>();
for (Element image : page.select("div.i a img")) {
// Get image URL
String imageURL = image.attr("src");
@@ -171,7 +171,7 @@ public class CheebyRipper extends AbstractHTMLRipper {
private class Image {
String url, prefix;
int index;
- public Image(String url, String prefix, int index) {
+ Image(String url, String prefix, int index) {
this.url = url;
this.prefix = prefix;
this.index = index;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
index e235d90d..213a340a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
@@ -22,23 +22,21 @@ public class CheveretoRipper extends AbstractHTMLRipper {
super(url);
}
- public static List explicit_domains_1 = Arrays.asList("hushpix.com", "tag-fox.com");
+ private static List explicit_domains_1 = Arrays.asList("hushpix.com", "tag-fox.com");
@Override
public String getHost() {
- String host = url.toExternalForm().split("/")[2];
- return host;
+ return url.toExternalForm().split("/")[2];
}
@Override
public String getDomain() {
- String host = url.toExternalForm().split("/")[2];
- return host;
+ return url.toExternalForm().split("/")[2];
}
@Override
public boolean canRip(URL url) {
String url_name = url.toExternalForm();
- if (explicit_domains_1.contains(url_name.split("/")[2]) == true) {
+ if (explicit_domains_1.contains(url_name.split("/")[2])) {
Pattern pa = Pattern.compile("(?:https?://)?(?:www\\.)?[a-z1-9-]*\\.[a-z1-9]*/album/([a-zA-Z1-9]*)/?$");
Matcher ma = pa.matcher(url.toExternalForm());
if (ma.matches()) {
@@ -103,7 +101,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List result = new ArrayList();
+ List result = new ArrayList<>();
for (Element el : doc.select("a.image-container > img")) {
String imageSource = el.attr("src");
// We remove the .md from images so we download the full size image
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DatwinRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DatwinRipper.java
index 2f276bd8..38708230 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/DatwinRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DatwinRipper.java
@@ -49,7 +49,7 @@ public class DatwinRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("img.attachment-thumbnail")) {
String image = thumb.attr("src");
image = image.replaceAll("-\\d{1,3}x\\d{1,3}", "");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
index e61cb007..15b0fd5d 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
@@ -14,6 +14,7 @@ import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import com.rarchives.ripme.ripper.AbstractRipper;
import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
@@ -32,8 +33,8 @@ public class DeviantartRipper extends AbstractHTMLRipper {
private static final int PAGE_SLEEP_TIME = 3000,
IMAGE_SLEEP_TIME = 2000;
- private Map cookies = new HashMap();
- private Set triedURLs = new HashSet();
+ private Map cookies = new HashMap<>();
+ private Set triedURLs = new HashSet<>();
public DeviantartRipper(URL url) throws IOException {
super(url);
@@ -63,7 +64,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
u += "gallery/?";
}
- Pattern p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{1,})\\.deviantart\\.com/favou?rites/([0-9]+)/*?$");
+ Pattern p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/([0-9]+)/*?$");
Matcher m = p.matcher(url.toExternalForm());
if (!m.matches()) {
String subdir = "/";
@@ -88,18 +89,18 @@ public class DeviantartRipper extends AbstractHTMLRipper {
return m.group(1);
}
}
- p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{1,})\\.deviantart\\.com/gallery/([0-9]{1,}).*$");
+ p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/gallery/([0-9]+).*$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
// Subgallery
return m.group(1) + "_" + m.group(2);
}
- p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{1,})\\.deviantart\\.com/favou?rites/([0-9]+)/.*?$");
+ p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/([0-9]+)/.*?$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1) + "_faves_" + m.group(2);
}
- p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{1,})\\.deviantart\\.com/favou?rites/?$");
+ p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/?$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
// Subgallery
@@ -121,14 +122,14 @@ public class DeviantartRipper extends AbstractHTMLRipper {
.cookies(cookies)
.get();
}
- public String jsonToImage(Document page,String id) {
+ private String jsonToImage(Document page, String id) {
Elements js = page.select("script[type=\"text/javascript\"]");
for (Element tag : js) {
if (tag.html().contains("window.__pageload")) {
try {
String script = tag.html();
script = script.substring(script.indexOf("window.__pageload"));
- if (script.indexOf(id) < 0) {
+ if (!script.contains(id)) {
continue;
}
script = script.substring(script.indexOf(id));
@@ -144,7 +145,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
}
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
// Iterate over all thumbnails
for (Element thumb : page.select("div.zones-container span.thumb")) {
@@ -194,7 +195,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
}
@Override
public List getDescriptionsFromPage(Document page) {
- List textURLs = new ArrayList();
+ List textURLs = new ArrayList<>();
// Iterate over all thumbnails
for (Element thumb : page.select("div.zones-container span.thumb")) {
logger.info(thumb.attr("href"));
@@ -257,9 +258,9 @@ public class DeviantartRipper extends AbstractHTMLRipper {
* @return Full-size image URL
* @throws Exception If it can't find the full-size URL
*/
- public static String thumbToFull(String thumb, boolean throwException) throws Exception {
+ private static String thumbToFull(String thumb, boolean throwException) throws Exception {
thumb = thumb.replace("http://th", "http://fc");
- List fields = new ArrayList(Arrays.asList(thumb.split("/")));
+ List fields = new ArrayList<>(Arrays.asList(thumb.split("/")));
fields.remove(4);
if (!fields.get(4).equals("f") && throwException) {
// Not a full-size image
@@ -339,7 +340,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
* @param page Page the thumbnail is retrieved from
* @return Highest-resolution version of the image based on thumbnail URL and the page.
*/
- public String smallToFull(String thumb, String page) {
+ private String smallToFull(String thumb, String page) {
try {
// Fetch the image page
Response resp = Http.url(page)
@@ -373,7 +374,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
}
cookieString = cookieString.substring(0,cookieString.length() - 1);
con.setRequestProperty("Cookie",cookieString);
- con.setRequestProperty("User-Agent",this.USER_AGENT);
+ con.setRequestProperty("User-Agent", USER_AGENT);
con.setInstanceFollowRedirects(true);
con.connect();
int code = con.getResponseCode();
@@ -406,7 +407,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
*/
private Map loginToDeviantart() throws IOException {
// Populate postData fields
- Map postData = new HashMap();
+ Map postData = new HashMap<>();
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
if (username == null || password == null) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java
index 83fba022..14eacbb5 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DrawcrowdRipper.java
@@ -72,7 +72,7 @@ public class DrawcrowdRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : page.select("div.item.asset img")) {
String image = thumb.attr("src");
image = image
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java
index 3cb606c7..f61e6f98 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java
@@ -18,11 +18,11 @@ import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class E621Ripper extends AbstractHTMLRipper {
- public static final int POOL_IMAGES_PER_PAGE = 24;
+ private static final int POOL_IMAGES_PER_PAGE = 24;
private DownloadThreadPool e621ThreadPool = new DownloadThreadPool("e621");
- public E621Ripper(URL url) throws IOException {
+ private E621Ripper(URL url) throws IOException {
super(url);
}
@@ -53,7 +53,7 @@ public class E621Ripper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
Elements elements = page.select("#post-list .thumb a,#pool-show .thumb a");
- List res = new ArrayList(elements.size());
+ List res = new ArrayList<>(elements.size());
if (page.getElementById("pool-show") != null) {
int index = 0;
@@ -92,23 +92,21 @@ public class E621Ripper extends AbstractHTMLRipper {
@Override
public void downloadURL(final URL url, int index) {
- e621ThreadPool.addThread(new Thread(new Runnable() {
- public void run() {
- try {
- Document page = Http.url(url).get();
- Element e = page.getElementById("image");
+ e621ThreadPool.addThread(new Thread(() -> {
+ try {
+ Document page = Http.url(url).get();
+ Element e = page.getElementById("image");
- if (e != null) {
- addURLToDownload(new URL(e.absUrl("src")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
- } else if ((e = page.select(".content object>param[name=\"movie\"]").first()) != null) {
- addURLToDownload(new URL(e.absUrl("value")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
- } else {
- Logger.getLogger(E621Ripper.class.getName()).log(Level.WARNING, "Unsupported media type - please report to program author: " + url.toString());
- }
-
- } catch (IOException ex) {
- Logger.getLogger(E621Ripper.class.getName()).log(Level.SEVERE, null, ex);
+ if (e != null) {
+ addURLToDownload(new URL(e.absUrl("src")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
+ } else if ((e = page.select(".content object>param[name=\"movie\"]").first()) != null) {
+ addURLToDownload(new URL(e.absUrl("value")), Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
+ } else {
+ Logger.getLogger(E621Ripper.class.getName()).log(Level.WARNING, "Unsupported media type - please report to program author: " + url.toString());
}
+
+ } catch (IOException ex) {
+ Logger.getLogger(E621Ripper.class.getName()).log(Level.SEVERE, null, ex);
}
}));
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
index a622d832..8bd87cb2 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
@@ -39,7 +39,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
// Current HTML document
private Document albumDoc = null;
- private static final Map cookies = new HashMap();
+ private static final Map cookies = new HashMap<>();
static {
cookies.put("nw", "1");
cookies.put("tip", "1");
@@ -162,7 +162,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
Elements thumbs = page.select("#gdt > .gdtm a");
// Iterate over images on page
for (Element thumb : thumbs) {
@@ -193,7 +193,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
private int index;
private File workingDir;
- public EHentaiImageThread(URL url, int index, File workingDir) {
+ EHentaiImageThread(URL url, int index, File workingDir) {
super();
this.url = url;
this.index = index;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
index 2be5b41e..91771713 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
@@ -23,10 +23,10 @@ import com.rarchives.ripme.utils.Http;
public class EightmusesRipper extends AbstractHTMLRipper {
private Document albumDoc = null;
- private Map cookies = new HashMap();
+ private Map cookies = new HashMap<>();
// TODO put up a wiki page on using maps to store titles
// the map for storing the title of each album when downloading sub albums
- private Map urlTitles = new HashMap();
+ private Map urlTitles = new HashMap<>();
private Boolean rippingSubalbums = false;
@@ -81,7 +81,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
// get the first image link on the page and check if the last char in it is a number
// if it is a number then we're ripping a comic if not it's a subalbum
String firstImageLink = page.select("div.gallery > a.t-hover").first().attr("href");
@@ -136,7 +136,6 @@ public class EightmusesRipper extends AbstractHTMLRipper {
imageURLs.addAll(subalbumImages);
} catch (IOException e) {
logger.warn("Error while loading subalbum " + subUrl, e);
- continue;
}
}
}
@@ -182,8 +181,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
Element fullSizeImage = doc.select(".photo").first(); // Select the "photo" element from the page (there should only be 1)
// subdir is the sub dir the cdn has the image stored in
String subdir = doc.select("input#imageDir").first().attr("value");
- String path = "https://cdn.ampproject.org/i/s/www.8muses.com/" + subdir + "small/" + fullSizeImage.children().select("#imageName").attr("value"); // Append the path to the fullsize image file to the standard prefix
- return path;
+ return "https://cdn.ampproject.org/i/s/www.8muses.com/" + subdir + "small/" + fullSizeImage.children().select("#imageName").attr("value");
}
@Override
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
index 85839e7a..6b7ba0a0 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java
@@ -71,19 +71,13 @@ public class EroShareRipper extends AbstractHTMLRipper {
Pattern p_eroshare_profile = Pattern.compile("^https?://eroshare.com/u/([a-zA-Z0-9\\-_]+)/?$");
Matcher m_eroshare_profile = p_eroshare_profile.matcher(url.toExternalForm());
- if (m_eroshare_profile.matches()) {
- return true;
- }
- return false;
+ return m_eroshare_profile.matches();
}
- public boolean is_profile(URL url) {
+ private boolean is_profile(URL url) {
Pattern pa = Pattern.compile("^https?://eroshae.com/u/([a-zA-Z0-9\\-_]+)/?$");
Matcher ma = pa.matcher(url.toExternalForm());
- if (ma.matches()) {
- return true;
- }
- return false;
+ return ma.matches();
}
@Override
@@ -103,7 +97,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
- if (is_profile(url) == false) {
+ if (!is_profile(url)) {
try {
// Attempt to use album title as GID
Element titleElement = getFirstPage().select("meta[property=og:title]").first();
@@ -122,7 +116,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List URLs = new ArrayList();
+ List URLs = new ArrayList<>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs) {
@@ -172,9 +166,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
.ignoreContentType()
.response();
- Document doc = resp.parse();
-
- return doc;
+ return resp.parse();
}
@Override
@@ -214,7 +206,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
Document doc = resp.parse();
- List URLs = new ArrayList();
+ List URLs = new ArrayList<>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
index 1e41135f..1cb6774e 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
@@ -68,7 +68,7 @@ public class EromeRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List URLs = new ArrayList();
+ List URLs = new ArrayList<>();
//Pictures
Elements imgs = doc.select("div.img > img.img-front");
for (Element img : imgs) {
@@ -92,9 +92,7 @@ public class EromeRipper extends AbstractHTMLRipper {
.ignoreContentType()
.response();
- Document doc = resp.parse();
-
- return doc;
+ return resp.parse();
}
@Override
@@ -115,7 +113,7 @@ public class EromeRipper extends AbstractHTMLRipper {
Document doc = resp.parse();
- List URLs = new ArrayList();
+ List URLs = new ArrayList<>();
//Pictures
Elements imgs = doc.getElementsByTag("img");
for (Element img : imgs) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FapprovedRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FapprovedRipper.java
index a594a72b..e630b3a3 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FapprovedRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FapprovedRipper.java
@@ -80,7 +80,7 @@ public class FapprovedRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element image : page.select("div.actual-image img")) {
String imageURL = image.attr("src");
if (imageURL.startsWith("//")) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
index bec9032d..a89e2d1b 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FineboxRipper.java
@@ -80,7 +80,7 @@ public class FineboxRipper extends AlbumRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^https?://(www\\.)?(v|f)inebox\\.co/u/([a-zA-Z0-9]{1,}).*$");
+ Pattern p = Pattern.compile("^https?://(www\\.)?([vf])inebox\\.co/u/([a-zA-Z0-9]+).*$");
Matcher m = p.matcher(url.toExternalForm());
if (!m.matches()) {
throw new MalformedURLException("Expected format: http://"+DOMAIN+"/u/USERNAME");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
index 42649af9..7e532943 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
@@ -256,7 +256,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
@Override
public List getURLsFromJSON(JSONObject json) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
JSONArray photos = json.getJSONArray("photos");
for (int i = 0; i < photos.length(); i++) {
if (super.isStopped()) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
index b893693e..df7aee5b 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
@@ -28,9 +28,9 @@ import com.rarchives.ripme.utils.Utils;
public class FlickrRipper extends AbstractHTMLRipper {
private int page = 1;
- private Set attempted = new HashSet();
+ private Set attempted = new HashSet<>();
private Document albumDoc = null;
- private DownloadThreadPool flickrThreadPool;
+ private final DownloadThreadPool flickrThreadPool;
@Override
public DownloadThreadPool getThreadPool() {
return flickrThreadPool;
@@ -162,7 +162,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : page.select("a[data-track=photo-click]")) {
/* TODO find a way to persist the image title
String imageTitle = null;
@@ -215,7 +215,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
.method(Method.GET)
.execute();
Document doc = resp.parse();
- Map postData = new HashMap();
+ Map postData = new HashMap<>();
for (Element input : doc.select("input[type=hidden]")) {
postData.put(input.attr("name"), input.attr("value"));
}
@@ -239,7 +239,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
private URL url;
private int index;
- public FlickrImageThread(URL url, int index) {
+ FlickrImageThread(URL url, int index) {
super();
this.url = url;
this.index = index;
@@ -252,7 +252,6 @@ public class FlickrRipper extends AbstractHTMLRipper {
Elements fullsizeImages = doc.select("div#allsizes-photo img");
if (fullsizeImages.size() == 0) {
logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
- return;
}
else {
String prefix = "";
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
index 08d53eec..9c4b89aa 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
@@ -28,8 +28,8 @@ import com.rarchives.ripme.utils.Http;
public class FuraffinityRipper extends AbstractHTMLRipper {
- static Map cookies=null;
- static final String urlBase = "https://www.furaffinity.net";
+ private static Map cookies=null;
+ private static final String urlBase = "https://www.furaffinity.net";
// Thread pool for finding direct image links from "image" pages (html)
private DownloadThreadPool furaffinityThreadPool
@@ -75,7 +75,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
.response();
cookies = loginPage.cookies();
- Map formData = new HashMap();
+ Map formData = new HashMap<>();
formData.put("action", "login");
formData.put("retard_protection", "1");
formData.put("name", user);
@@ -112,7 +112,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List urls = new ArrayList();
+ List urls = new ArrayList<>();
Elements urlElements = page.select("figure.t-image > b > u > a");
for (Element e : urlElements) {
urls.add(urlBase + e.select("a").first().attr("href"));
@@ -121,7 +121,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
}
@Override
public List getDescriptionsFromPage(Document page) {
- List urls = new ArrayList();
+ List urls = new ArrayList<>();
Elements urlElements = page.select("figure.t-image > b > u > a");
for (Element e : urlElements) {
urls.add(urlBase + e.select("a").first().attr("href"));
@@ -157,9 +157,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
ele.select("p").prepend("\\n\\n");
logger.debug("Returning description at " + page);
String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
- String title = documentz.select("meta[property=og:title]").attr("content");
- String tempText = title;
- return tempText + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
+ return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
} catch (IOException ioe) {
logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
return null;
@@ -232,7 +230,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
private class FuraffinityDocumentThread extends Thread {
private URL url;
- public FuraffinityDocumentThread(URL url) {
+ FuraffinityDocumentThread(URL url) {
super();
this.url = url;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
index 1f5fc35d..5ebb7297 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
@@ -60,7 +60,7 @@ public class FuskatorRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
String html = doc.html();
// Get "baseUrl"
String baseUrl = Utils.between(html, "unescape('", "'").get(0);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GifyoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GifyoRipper.java
index 3e4d4853..e844e782 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/GifyoRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GifyoRipper.java
@@ -22,7 +22,7 @@ import com.rarchives.ripme.utils.Http;
public class GifyoRipper extends AbstractHTMLRipper {
private int page = 0;
- private Map cookies = new HashMap();
+ private Map cookies = new HashMap<>();
public GifyoRipper(URL url) throws IOException {
super(url);
@@ -70,7 +70,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
page++;
- Map postData = new HashMap();
+ Map postData = new HashMap<>();
postData.put("cmd", "refreshData");
postData.put("view", "gif");
postData.put("layout", "grid");
@@ -92,7 +92,7 @@ public class GifyoRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element image : doc.select("img.profile_gif")) {
String imageUrl = image.attr("data-animated");
if (imageUrl.startsWith("//")) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
index 1950cf8e..271d0313 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
@@ -50,7 +50,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper {
Pattern p;
Matcher m;
- p = Pattern.compile("^www\\.girlsofdesire\\.org\\/galleries\\/([\\w\\d-]+)\\/$");
+ p = Pattern.compile("^www\\.girlsofdesire\\.org/galleries/([\\w\\d-]+)/$");
m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
@@ -72,7 +72,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("td.vtop > a > img")) {
String imgSrc = thumb.attr("src");
imgSrc = imgSrc.replaceAll("_thumb\\.", ".");
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiCafeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiCafeRipper.java
index a6d93f93..9afeed26 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiCafeRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiCafeRipper.java
@@ -64,7 +64,7 @@ public class HentaiCafeRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List result = new ArrayList();
+ List result = new ArrayList<>();
result.add(doc.select("div[id=page] > div.inner > a > img.open").attr("src"));
return result;
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
index ca4e906e..561c4249 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
@@ -20,7 +20,7 @@ import com.rarchives.ripme.utils.Http;
public class HentaifoundryRipper extends AbstractHTMLRipper {
- private Map cookies = new HashMap();
+ private Map cookies = new HashMap<>();
public HentaifoundryRipper(URL url) throws IOException {
super(url);
}
@@ -84,7 +84,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
Pattern imgRegex = Pattern.compile(".*/user/([a-zA-Z0-9\\-_]+)/(\\d+)/.*");
for (Element thumb : doc.select("div.thumb_square > a.thumbLink")) {
if (isStopped()) {
@@ -115,7 +115,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
imagePage = null;
}
// This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
- if (imagePage.select("div.boxbody > img.center").attr("src").contains("thumbs.") == true) {
+ if (imagePage.select("div.boxbody > img.center").attr("src").contains("thumbs.")) {
imageURLs.add("http:" + imagePage.select("div.boxbody > img.center").attr("onclick").replace("this.src=", "").replace("'", "").replace("; $(#resize_message).hide();", ""));
}
else {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java
index 1175e8a0..475817c9 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java
@@ -31,7 +31,7 @@ public class ImagearnRipper extends AbstractHTMLRipper {
@Override
public String getGID(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^.*imagearn.com/{1,}gallery.php\\?id=([0-9]{1,}).*$");
+ Pattern p = Pattern.compile("^.*imagearn.com/+gallery.php\\?id=([0-9]+).*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(1);
@@ -43,7 +43,7 @@ public class ImagearnRipper extends AbstractHTMLRipper {
}
public URL sanitizeURL(URL url) throws MalformedURLException {
- Pattern p = Pattern.compile("^.*imagearn.com/{1,}image.php\\?id=[0-9]{1,}.*$");
+ Pattern p = Pattern.compile("^.*imagearn.com/+image.php\\?id=[0-9]+.*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
// URL points to imagearn *image*, not gallery
@@ -77,7 +77,7 @@ public class ImagearnRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("div#gallery > div > a")) {
String imageURL = thumb.attr("href");
try {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
index f4608d73..323ad1de 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
@@ -81,7 +81,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("div > a[target=_blank]:not(.footera)")) {
imageURLs.add(thumb.attr("href"));
}
@@ -124,7 +124,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
private URL url;
private int index;
- public ImagebamImageThread(URL url, int index) {
+ ImagebamImageThread(URL url, int index) {
super();
this.url = url;
this.index = index;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
index 8e0aba13..83d4f098 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
@@ -120,7 +120,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("#gallery img")) {
if (!thumb.hasAttr("src") || !thumb.hasAttr("width")) {
continue;
@@ -129,7 +129,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
image = image.replaceAll(
"http://x.*.fap.to/images/thumb/",
"http://fap.to/images/full/");
- image = image.replaceAll("w[0-9]{1,}-h[0-9]{1,}/", "");
+ image = image.replaceAll("w[0-9]+-h[0-9]+/", "");
imageURLs.add(image);
if (isThisATest()) {
break;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagestashRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagestashRipper.java
index ad94b85b..c8cc3d74 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagestashRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagestashRipper.java
@@ -67,7 +67,7 @@ public class ImagestashRipper extends AbstractJSONRipper {
@Override
public List getURLsFromJSON(JSONObject json) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
JSONArray images = json.getJSONArray("images");
for (int i = 0; i < images.length(); i++) {
JSONObject image = images.getJSONObject(i);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
index c818f3ec..d62722e7 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
@@ -62,7 +62,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
}
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("a[target=_blank]")) {
imageURLs.add(thumb.attr("href"));
}
@@ -83,7 +83,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
private URL url;
private int index;
- public ImagevenueImageThread(URL url, int index) {
+ ImagevenueImageThread(URL url, int index) {
super();
this.url = url;
this.index = index;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java
index 69392c95..846b4c80 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java
@@ -46,7 +46,7 @@ public class ImgboxRipper extends AbstractHTMLRipper {
}
@Override
public List getURLsFromPage(Document doc) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
for (Element thumb : doc.select("div.boxed-content > a > img")) {
String image = thumb.attr("src")
.replaceAll("[-a-zA-Z0-9.]+s.imgbox.com",
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
index 0b7fd941..34cbd1ce 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
@@ -31,14 +31,15 @@ public class ImgurRipper extends AlbumRipper {
private Document albumDoc;
- static enum ALBUM_TYPE {
+ enum ALBUM_TYPE {
ALBUM,
USER,
USER_ALBUM,
USER_IMAGES,
SERIES_OF_IMAGES,
SUBREDDIT
- };
+ }
+
private ALBUM_TYPE albumType;
public ImgurRipper(URL url) throws IOException {
@@ -223,7 +224,7 @@ public class ImgurRipper extends AlbumRipper {
String[] imageIds = m.group(1).split(",");
for (String imageId : imageIds) {
// TODO: Fetch image with ID imageId
- logger.debug("Fetching image info for ID " + imageId);;
+ logger.debug("Fetching image info for ID " + imageId);
try {
JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON();
if (!json.has("image")) {
@@ -350,7 +351,6 @@ public class ImgurRipper extends AlbumRipper {
Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000);
} catch (Exception e) {
logger.error("Error while ripping album: " + e.getMessage(), e);
- continue;
}
}
}
@@ -515,12 +515,12 @@ public class ImgurRipper extends AlbumRipper {
}
public static class ImgurImage {
- public String title = "",
- description = "",
- extension = "";
+ String title = "";
+ String description = "";
+ String extension = "";
public URL url = null;
- public ImgurImage(URL url) {
+ ImgurImage(URL url) {
this.url = url;
String tempUrl = url.toExternalForm();
this.extension = tempUrl.substring(tempUrl.lastIndexOf('.'));
@@ -528,7 +528,7 @@ public class ImgurRipper extends AlbumRipper {
this.extension = this.extension.substring(0, this.extension.indexOf("?"));
}
}
- public ImgurImage(URL url, String title) {
+ ImgurImage(URL url, String title) {
this(url);
this.title = title;
}
@@ -536,7 +536,7 @@ public class ImgurRipper extends AlbumRipper {
this(url, title);
this.description = description;
}
- public String getSaveAs() {
+ String getSaveAs() {
String saveAs = this.title;
String u = url.toExternalForm();
if (u.contains("?")) {
@@ -554,17 +554,17 @@ public class ImgurRipper extends AlbumRipper {
}
public static class ImgurAlbum {
- public String title = null;
+ String title = null;
public URL url = null;
- public List images = new ArrayList();
- public ImgurAlbum(URL url) {
+ public List images = new ArrayList<>();
+ ImgurAlbum(URL url) {
this.url = url;
}
public ImgurAlbum(URL url, String title) {
this(url);
this.title = title;
}
- public void addImage(ImgurImage image) {
+ void addImage(ImgurImage image) {
images.add(image);
}
}
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
index b4a3304f..92cb97a4 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
@@ -75,8 +75,7 @@ public class InstagramRipper extends AbstractJSONRipper {
String baseURL = "http://instagram.com/" + userID + "/media";
try {
- JSONObject result = Http.url(baseURL).getJSON();
- return result;
+ return Http.url(baseURL).getJSON();
} catch (JSONException e) {
throw new IOException("Could not get instagram user via: " + baseURL);
}
@@ -101,9 +100,7 @@ public class InstagramRipper extends AbstractJSONRipper {
logger.info("Loading " + baseURL);
sleep(1000);
- JSONObject nextJSON = Http.url(baseURL).getJSON();
-
- return nextJSON;
+ return Http.url(baseURL).getJSON();
} else {
throw new IOException("No more images found");
}
@@ -129,7 +126,7 @@ public class InstagramRipper extends AbstractJSONRipper {
// Instagram returns cropped images to unauthenticated applications to maintain legacy support.
// To retrieve the uncropped image, remove this segment from the URL.
// Segment format: cX.Y.W.H - eg: c0.134.1080.1080
- imageURL = imageURL.replaceAll("\\/c\\d{1,4}\\.\\d{1,4}\\.\\d{1,4}\\.\\d{1,4}", "");
+ imageURL = imageURL.replaceAll("/c\\d{1,4}\\.\\d{1,4}\\.\\d{1,4}\\.\\d{1,4}", "");
imageURL = imageURL.replaceAll("\\?ig_cache_key.+$", "");
return imageURL;
@@ -154,7 +151,7 @@ public class InstagramRipper extends AbstractJSONRipper {
@Override
public List getURLsFromJSON(JSONObject json) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
JSONArray datas = json.getJSONArray("items");
for (int i = 0; i < datas.length(); i++) {
JSONObject data = (JSONObject) datas.get(i);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
index 13ff0d30..a5e3eb37 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
@@ -50,7 +50,7 @@ public class JagodibujaRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document doc) {
- List result = new ArrayList();
+ List result = new ArrayList<>();
for (Element comicPageUrl : doc.select("div.gallery-icon > a")) {
try {
sleep(500);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
index 13eb13f1..fd20de5a 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
@@ -51,7 +51,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
@Override
public List getURLsFromPage(Document page) {
- List urls = new ArrayList();
+ List urls = new ArrayList<>();
Elements urlElements = page.select("img#single_picture");
for (Element e : urlElements) {
urls.add(e.attr("src"));
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MediacrushRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MediacrushRipper.java
index b3fdbdaf..687ca509 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MediacrushRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MediacrushRipper.java
@@ -25,7 +25,7 @@ import com.rarchives.ripme.utils.Http;
public class MediacrushRipper extends AbstractJSONRipper {
/** Ordered list of preferred formats, sorted by preference (low-to-high) */
- private static final Map PREFERRED_FORMATS = new HashMap();
+ private static final Map PREFERRED_FORMATS = new HashMap<>();
static {
PREFERRED_FORMATS.put("mp4", 0);
PREFERRED_FORMATS.put("wemb",1);
@@ -36,7 +36,7 @@ public class MediacrushRipper extends AbstractJSONRipper {
PREFERRED_FORMATS.put("png", 6);
PREFERRED_FORMATS.put("jpg", 7);
PREFERRED_FORMATS.put("jpeg",8);
- };
+ }
public MediacrushRipper(URL url) throws IOException {
super(url);
@@ -113,7 +113,7 @@ public class MediacrushRipper extends AbstractJSONRipper {
@Override
public List getURLsFromJSON(JSONObject json) {
- List imageURLs = new ArrayList();
+ List imageURLs = new ArrayList<>();
// Iterate over all files
JSONArray files = json.getJSONArray("files");
for (int i = 0; i < files.length(); i++) {
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java
index 9dc5c563..11948d98 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java
@@ -41,7 +41,7 @@ public class ModelmayhemRipper extends AlbumRipper {
@Override
public void rip() throws IOException {
Map cookies = null,
- postData = new HashMap();
+ postData = new HashMap<>();
String gid = getGID(this.url),
ref = "http://www.modelmayhem.com/" + gid;
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
index be2bc740..d8fb3655 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
@@ -126,7 +126,7 @@ public class MotherlessRipper extends AlbumRipper {
private URL url;
private int index;
- public MotherlessImageThread(URL url, int index) {
+ MotherlessImageThread(URL url, int index) {
super();
this.url = url;
this.index = index;
@@ -142,7 +142,7 @@ public class MotherlessRipper extends AlbumRipper {
Document doc = Http.url(u)
.referrer(u)
.get();
- Pattern p = Pattern.compile("^.*__fileurl = '([^']{1,})';.*$", Pattern.DOTALL);
+ Pattern p = Pattern.compile("^.*__fileurl = '([^']+)';.*$", Pattern.DOTALL);
Matcher m = p.matcher(doc.outerHtml());
if (m.matches()) {
String file = m.group(1);
diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
index b8ab2c2e..023d80fe 100644
--- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
+++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
@@ -15,7 +15,7 @@ import org.jsoup.select.Elements;
import java.util.Arrays;
public class MyhentaicomicsRipper extends AbstractHTMLRipper {
- public static boolean isTag;
+ private static boolean isTag;
public MyhentaicomicsRipper(URL url) throws IOException {
super(url);
@@ -47,7 +47,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return ma.group(1);
}
- Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+\\?=:]*)?$");
+ Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
Matcher mat = pat.matcher(url.toExternalForm());
if (mat.matches()) {
isTag = true;
@@ -84,8 +84,8 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
}
// This replaces getNextPage when downloading from searchs and tags
- public List getNextAlbumPage(String pageUrl) {
- List albumPagesList = new ArrayList();
+ private List getNextAlbumPage(String pageUrl) {
+ List albumPagesList = new ArrayList<>();
int pageNumber = 1;
albumPagesList.add("http://myhentaicomics.com/index.php/" + pageUrl.split("\\?")[0] + "?page=" + Integer.toString(pageNumber));
while (true) {
@@ -115,9 +115,9 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return albumPagesList;
}
- public List getAlbumsFromPage(String url) {
+ private List getAlbumsFromPage(String url) {
List pagesToRip;
- List result = new ArrayList();
+ List result = new ArrayList<>();
logger.info("Running getAlbumsFromPage");
Document doc;
try {
@@ -161,7 +161,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
url_string = url_string.replace("%28", "_");
url_string = url_string.replace("%29", "_");
url_string = url_string.replace("%2C", "_");
- if (isTag == true) {
+ if (isTag) {
logger.info("Downloading from a tag or search");
try {
sleep(500);
@@ -180,11 +180,11 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
return result;
}
- public List getListOfPages(Document doc) {
- List