diff --git a/README.md b/README.md index 5861aef6..05dbd296 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/ * Built in updater * Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image) * Skips already downloaded images by default +* Can auto skip e-hentai and nhentai albums containing certain tags [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags) ## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites) diff --git a/build.bat b/build.bat index 7e7c3221..7c2aa6c3 100755 --- a/build.bat +++ b/build.bat @@ -1 +1,2 @@ -mvn clean compile assembly:single \ No newline at end of file +mvn clean compile assembly:single +mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file diff --git a/build.sh b/build.sh index a3ec0242..2f044cde 100755 --- a/build.sh +++ b/build.sh @@ -1,2 +1,4 @@ #!/usr/bin/env bash -mvn clean compile assembly:single \ No newline at end of file +mvn clean compile assembly:single +# Strip the jar of any non-reproducible metadata such as timestamps +mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file diff --git a/patch.py b/patch.py index ca63cbf3..0232ba7d 100644 --- a/patch.py +++ b/patch.py @@ -12,11 +12,13 @@ from hashlib import sha256 message = input('message: ') + def get_ripme_json(): with open('ripme.json') as dataFile: ripmeJson = json.load(dataFile) return ripmeJson + def update_hash(current_hash): ripmeJson = get_ripme_json() with open('ripme.json', 'w') as dataFile: @@ -24,6 +26,7 @@ def update_hash(current_hash): print(ripmeJson["currentHash"]) json.dump(ripmeJson, dataFile, indent=4) + def update_change_list(message): ripmeJson = get_ripme_json() with open('ripme.json', 'w') as dataFile: @@ -72,15 +75,17 @@ dataFile = open("ripme.json", "w") dataFile.write(outputContent) dataFile.close() -subprocess.call(['git', 'add', '-u']) -subprocess.call(['git', 'commit', '-m', commitMessage]) -subprocess.call(['git', 'tag', nextVersion]) print("Building ripme") subprocess.call(["mvn", "clean", "compile", "assembly:single"]) +print("Stripping jar") +subprocess.call(["mvn", "io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar"]) print("Hashing .jar file") openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb") readFile = openedFile.read() file_hash = sha256(readFile).hexdigest() print("Hash is: {}".format(file_hash)) print("Updating hash") -update_hash(file_hash) \ No newline at end of file +update_hash(file_hash) +subprocess.call(['git', 'add', '-u']) +subprocess.call(['git', 'commit', '-m', commitMessage]) +subprocess.call(['git', 'tag', nextVersion]) diff --git a/pom.xml b/pom.xml index c94f1d56..81b772ea 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.rarchives.ripme ripme jar - 1.7.50 + 1.7.51 ripme http://rip.rarchives.com @@ -61,6 +61,11 @@ + + io.github.zlika + reproducible-build-maven-plugin + 0.6 + maven-assembly-plugin diff --git a/release.py b/release.py new file mode 100644 index 00000000..3dcd3210 --- /dev/null +++ b/release.py @@ -0,0 +1,96 @@ +#!/usr/bin/python3 + +import re + +import os + +import sys +from hashlib import sha256 +from github import Github +import json +import argparse + +parser = argparse.ArgumentParser(description="Make a new ripme release on github") +parser.add_argument("-f", "--file", help="Path to the version of ripme to release") +parser.add_argument("-t", "--token", help="Your github personal access token") +parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true") +parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true") +args = parser.parse_args() + +try: + input = raw_input +except NameError: + pass + + +# Make sure the file the user selected is a jar +def isJar(filename): + if debug: + print("Checking if {} is a jar file".format(filename)) + return filename.endswith("jar") + + +# Returns true if last entry to the "changeList" section of ripme.json is in the format of $number.$number.$number: and +# false if not +def isValidCommitMessage(message): + if debug: + print("Checking if {} matchs pattern ^\d+\.\d+\.\d+:".format(message)) + pattern = re.compile("^\d+\.\d+\.\d+:") + return re.match(pattern, message) + + +ripmeJson = json.loads(open("ripme.json").read()) +fileToUploadPath = args.file +InNoninteractiveMode = args.non_interactive +commitMessage = ripmeJson.get("changeList")[0] +releaseVersion = ripmeJson.get("latestVersion") +debug = args.debug +accessToken = args.token +repoOwner = "ripmeapp" +repoName = "ripme" + +if not os.path.isfile(fileToUploadPath): + print("[!] Error: {} does not exist".format(fileToUploadPath)) + sys.exit(1) + +if not isJar(fileToUploadPath): + print("[!] Error: {} is not a jar file!".format(fileToUploadPath)) + sys.exit(1) + +if not isValidCommitMessage(commitMessage): + print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath)) + sys.exit(1) + +ripmeUpdate = open(fileToUploadPath, mode='rb').read() + +# The hash that we expect the update to have +expectedHash = ripmeJson.get("currentHash") + +# The actual hash of the file on disk +actualHash = sha256(ripmeUpdate).hexdigest() + +# Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will +# cause ripme to refuse to install the update for all users who haven't disabled update hash checking +if expectedHash != actualHash: + print("[!] Error: expected hash of file and actual hash differ") + print("[!] Expected hash is {}".format(expectedHash)) + print("[!] Actual hash is {}".format(actualHash)) + sys.exit(1) + +# Ask the user to review the information before we precede +# This only runs in we're in interactive mode +if not InNoninteractiveMode: + print("File path: {}".format(fileToUploadPath)) + print("Release title: {}".format(commitMessage)) + print("Repo: {}/{}".format(repoOwner, repoName)) + input("\nPlease review the information above and ensure it is correct and then press enter") + +print("Accessing github using token") +g = Github(accessToken) + + +print("Creating release") +release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "") + +print("Uploading file") +release.upload_asset(fileToUploadPath, "ripme.jar") diff --git a/ripme.json b/ripme.json index b2a8e044..90bde0ce 100644 --- a/ripme.json +++ b/ripme.json @@ -1,6 +1,7 @@ { - "currentHash": "f6e1e6c931abfbeffdd37dabb65f83e4335ca11ccc017f31e1d835ee6e6bec7a", + "currentHash": "aadb71bf5cdf46fe92e270b50a55c8d8d7200a6dd304a4c2ac9f68cddc687d7e", "changeList": [ + "1.7.51: Fixed instagram ripper; Added the ability to rip from vsco profiles; Fixed TheyiffgalleryRipper; Can now update ripme using the -j flag; added script to automate releases; Code style fixes", "1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new", "1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked", "1.7.48: Fixed instagram ripper; Added Korean translation; Added quickQueue support to nhentairipper; Rewrote nhentairipper to be faster; myhentaicomics ripper now requests proper url when downloading images; Can now include comments in url files; Added the ability to blacklist tags on e-hentai.org", @@ -222,5 +223,5 @@ "1.0.3: Added VK.com ripper", "1.0.1: Added auto-update functionality" ], - "latestVersion": "1.7.50" -} \ No newline at end of file + "latestVersion": "1.7.51" +} diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java index 6a9e62a3..b28b0ba4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java @@ -81,7 +81,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { public void rip() throws IOException { int index = 0; int textindex = 0; - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); Document doc = getFirstPage(); @@ -117,7 +117,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { for (String imageURL : imageURLs) { index += 1; - logger.debug("Found image url #" + index + ": " + imageURL); + LOGGER.debug("Found image url #" + index + ": " + imageURL); downloadURL(new URL(imageURL), index); if (isStopped()) { break; @@ -125,16 +125,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { } } if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) { - logger.debug("Fetching description(s) from " + doc.location()); + LOGGER.debug("Fetching description(s) from " + doc.location()); List textURLs = getDescriptionsFromPage(doc); if (!textURLs.isEmpty()) { - logger.debug("Found description link(s) from " + doc.location()); + LOGGER.debug("Found description link(s) from " + doc.location()); for (String textURL : textURLs) { if (isStopped()) { break; } textindex += 1; - logger.debug("Getting description from " + textURL); + LOGGER.debug("Getting description from " + textURL); String[] tempDesc = getDescription(textURL,doc); if (tempDesc != null) { if (Utils.getConfigBoolean("file.overwrite", false) || !(new File( @@ -144,11 +144,11 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { + getPrefix(index) + (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))) + ".txt").exists())) { - logger.debug("Got description from " + textURL); + LOGGER.debug("Got description from " + textURL); saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))); sleep(descSleepTime()); } else { - logger.debug("Description from " + textURL + " already exists."); + LOGGER.debug("Description from " + textURL + " already exists."); } } @@ -164,14 +164,14 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { sendUpdate(STATUS.LOADING_RESOURCE, "next page"); doc = getNextPage(doc); } catch (IOException e) { - logger.info("Can't get next page: " + e.getMessage()); + LOGGER.info("Can't get next page: " + e.getMessage()); break; } } // If they're using a thread pool, wait for it. if (getThreadPool() != null) { - logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName()); + LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName()); getThreadPool().waitForThreads(); } waitForThreads(); @@ -237,12 +237,12 @@ public abstract class AbstractHTMLRipper extends AlbumRipper { out.write(text.getBytes()); out.close(); } catch (IOException e) { - logger.error("[!] Error creating save file path for description '" + url + "':", e); + LOGGER.error("[!] Error creating save file path for description '" + url + "':", e); return false; } - logger.debug("Downloading " + url + "'s description to " + saveFileAs); + LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); if (!saveFileAs.getParentFile().exists()) { - logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); saveFileAs.getParentFile().mkdirs(); } return true; diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java index 93146d4b..291dd7df 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java @@ -50,7 +50,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper { @Override public void rip() throws IOException { int index = 0; - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); JSONObject json = getFirstPage(); @@ -79,7 +79,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper { } index += 1; - logger.debug("Found image url #" + index+ ": " + imageURL); + LOGGER.debug("Found image url #" + index+ ": " + imageURL); downloadURL(new URL(imageURL), index); } @@ -91,14 +91,14 @@ public abstract class AbstractJSONRipper extends AlbumRipper { sendUpdate(STATUS.LOADING_RESOURCE, "next page"); json = getNextPage(json); } catch (IOException e) { - logger.info("Can't get next page: " + e.getMessage()); + LOGGER.info("Can't get next page: " + e.getMessage()); break; } } // If they're using a thread pool, wait for it. if (getThreadPool() != null) { - logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName()); + LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName()); getThreadPool().waitForThreads(); } waitForThreads(); diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java index dc04e801..87d8bd46 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java @@ -27,7 +27,7 @@ public abstract class AbstractRipper extends Observable implements RipperInterface, Runnable { - protected static final Logger logger = Logger.getLogger(AbstractRipper.class); + protected static final Logger LOGGER = Logger.getLogger(AbstractRipper.class); private final String URLHistoryFile = Utils.getURLHistoryFile(); public static final String USER_AGENT = @@ -77,11 +77,11 @@ public abstract class AbstractRipper try { File file = new File(URLHistoryFile); if (!new File(Utils.getConfigDir()).exists()) { - logger.error("Config dir doesn't exist"); - logger.info("Making config dir"); + LOGGER.error("Config dir doesn't exist"); + LOGGER.info("Making config dir"); boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs(); if (!couldMakeDir) { - logger.error("Couldn't make config dir"); + LOGGER.error("Couldn't make config dir"); return; } } @@ -89,12 +89,12 @@ public abstract class AbstractRipper if (!file.exists()) { boolean couldMakeDir = file.createNewFile(); if (!couldMakeDir) { - logger.error("Couldn't url history file"); + LOGGER.error("Couldn't url history file"); return; } } if (!file.canWrite()) { - logger.error("Can't write to url history file: " + URLHistoryFile); + LOGGER.error("Can't write to url history file: " + URLHistoryFile); return; } fw = new FileWriter(file.getAbsoluteFile(), true); @@ -247,10 +247,10 @@ public abstract class AbstractRipper try { stopCheck(); } catch (IOException e) { - logger.debug("Ripper has been stopped"); + LOGGER.debug("Ripper has been stopped"); return false; } - logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName); + LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName); String saveAs = getFileName(url, fileName, extension); File saveFileAs; try { @@ -265,19 +265,19 @@ public abstract class AbstractRipper + prefix + saveAs); } catch (IOException e) { - logger.error("[!] Error creating save file path for URL '" + url + "':", e); + LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e); return false; } - logger.debug("Downloading " + url + " to " + saveFileAs); + LOGGER.debug("Downloading " + url + " to " + saveFileAs); if (!saveFileAs.getParentFile().exists()) { - logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); saveFileAs.getParentFile().mkdirs(); } if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) { try { writeDownloadedURL(url.toExternalForm() + "\n"); } catch (IOException e) { - logger.debug("Unable to write URL history file"); + LOGGER.debug("Unable to write URL history file"); } } return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME); @@ -357,7 +357,7 @@ public abstract class AbstractRipper * Waits for downloading threads to complete. */ protected void waitForThreads() { - logger.debug("Waiting for threads to finish"); + LOGGER.debug("Waiting for threads to finish"); completed = false; threadPool.waitForThreads(); checkIfComplete(); @@ -409,13 +409,13 @@ public abstract class AbstractRipper */ void checkIfComplete() { if (observer == null) { - logger.debug("observer is null"); + LOGGER.debug("observer is null"); return; } if (!completed) { completed = true; - logger.info(" Rip completed!"); + LOGGER.info(" Rip completed!"); RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount()); RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc); @@ -424,7 +424,7 @@ public abstract class AbstractRipper Logger rootLogger = Logger.getRootLogger(); FileAppender fa = (FileAppender) rootLogger.getAppender("FILE"); if (fa != null) { - logger.debug("Changing log file back to 'ripme.log'"); + LOGGER.debug("Changing log file back to 'ripme.log'"); fa.setFile("ripme.log"); fa.activateOptions(); } @@ -433,7 +433,7 @@ public abstract class AbstractRipper try { Desktop.getDesktop().open(new File(urlFile)); } catch (IOException e) { - logger.warn("Error while opening " + urlFile, e); + LOGGER.warn("Error while opening " + urlFile, e); } } } @@ -488,7 +488,7 @@ public abstract class AbstractRipper for (Constructor constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) { try { AlbumRipper ripper = (AlbumRipper) constructor.newInstance(url); // by design: can throw ClassCastException - logger.debug("Found album ripper: " + ripper.getClass().getName()); + LOGGER.debug("Found album ripper: " + ripper.getClass().getName()); return ripper; } catch (Exception e) { // Incompatible rippers *will* throw exceptions during instantiation. @@ -497,7 +497,7 @@ public abstract class AbstractRipper for (Constructor constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) { try { VideoRipper ripper = (VideoRipper) constructor.newInstance(url); // by design: can throw ClassCastException - logger.debug("Found video ripper: " + ripper.getClass().getName()); + LOGGER.debug("Found video ripper: " + ripper.getClass().getName()); return ripper; } catch (Exception e) { // Incompatible rippers *will* throw exceptions during instantiation. @@ -554,11 +554,11 @@ public abstract class AbstractRipper try { rip(); } catch (HttpStatusException e) { - logger.error("Got exception while running ripper:", e); + LOGGER.error("Got exception while running ripper:", e); waitForThreads(); sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl()); } catch (Exception e) { - logger.error("Got exception while running ripper:", e); + LOGGER.error("Got exception while running ripper:", e); waitForThreads(); sendUpdate(STATUS.RIP_ERRORED, e.getMessage()); } finally { @@ -571,10 +571,10 @@ public abstract class AbstractRipper private void cleanup() { if (this.workingDir.list().length == 0) { // No files, delete the dir - logger.info("Deleting empty directory " + this.workingDir); + LOGGER.info("Deleting empty directory " + this.workingDir); boolean deleteResult = this.workingDir.delete(); if (!deleteResult) { - logger.error("Unable to delete empty directory " + this.workingDir); + LOGGER.error("Unable to delete empty directory " + this.workingDir); } } } @@ -589,11 +589,11 @@ public abstract class AbstractRipper */ protected boolean sleep(int milliseconds) { try { - logger.debug("Sleeping " + milliseconds + "ms"); + LOGGER.debug("Sleeping " + milliseconds + "ms"); Thread.sleep(milliseconds); return true; } catch (InterruptedException e) { - logger.error("Interrupted while waiting to load next page", e); + LOGGER.error("Interrupted while waiting to load next page", e); return false; } } @@ -607,7 +607,7 @@ public abstract class AbstractRipper /** Methods for detecting when we're running a test. */ public void markAsTest() { - logger.debug("THIS IS A TEST RIP"); + LOGGER.debug("THIS IS A TEST RIP"); thisIsATest = true; } protected boolean isThisATest() { diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java index b037052e..977de15d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java @@ -62,7 +62,7 @@ public abstract class AlbumRipper extends AbstractRipper { || itemsCompleted.containsKey(url) || itemsErrored.containsKey(url) )) { // Item is already downloaded/downloading, skip it. - logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs)); + LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs)); return false; } if (Utils.getConfigBoolean("urls_only.save", false)) { @@ -76,7 +76,7 @@ public abstract class AlbumRipper extends AbstractRipper { itemsCompleted.put(url, new File(urlFile)); observer.update(this, msg); } catch (IOException e) { - logger.error("Error while writing to " + urlFile, e); + LOGGER.error("Error while writing to " + urlFile, e); } } else { @@ -128,7 +128,7 @@ public abstract class AlbumRipper extends AbstractRipper { checkIfComplete(); } catch (Exception e) { - logger.error("Exception while updating observer: ", e); + LOGGER.error("Exception while updating observer: ", e); } } @@ -196,7 +196,7 @@ public abstract class AlbumRipper extends AbstractRipper { } else { title = super.getAlbumTitle(this.url); } - logger.debug("Using album title '" + title + "'"); + LOGGER.debug("Using album title '" + title + "'"); title = Utils.filesystemSafe(title); path += title; @@ -204,10 +204,10 @@ public abstract class AlbumRipper extends AbstractRipper { this.workingDir = new File(path); if (!this.workingDir.exists()) { - logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); this.workingDir.mkdirs(); } - logger.debug("Set working directory to: " + this.workingDir); + LOGGER.debug("Set working directory to: " + this.workingDir); } /** diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java index 6b1032e5..4fb0f32a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java @@ -1,5 +1,9 @@ package com.rarchives.ripme.ripper; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; +import com.rarchives.ripme.utils.Utils; + import java.io.File; import java.io.FileWriter; import java.io.IOException; @@ -7,29 +11,27 @@ import java.net.MalformedURLException; import java.net.URL; import java.util.Map; -import com.rarchives.ripme.ui.RipStatusMessage; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; -import com.rarchives.ripme.utils.Utils; -import com.sun.org.apache.xpath.internal.operations.Bool; public abstract class VideoRipper extends AbstractRipper { - private int bytesTotal = 1, - bytesCompleted = 1; + private int bytesTotal = 1; + private int bytesCompleted = 1; protected VideoRipper(URL url) throws IOException { super(url); } - public abstract boolean canRip(URL url); public abstract void rip() throws IOException; + public abstract String getHost(); + public abstract String getGID(URL url) throws MalformedURLException; @Override public void setBytesTotal(int bytes) { this.bytesTotal = bytes; } + @Override public void setBytesCompleted(int bytes) { this.bytesCompleted = bytes; @@ -53,15 +55,14 @@ public abstract class VideoRipper extends AbstractRipper { RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile); observer.update(this, msg); } catch (IOException e) { - logger.error("Error while writing to " + urlFile, e); + LOGGER.error("Error while writing to " + urlFile, e); return false; } - } - else { + } else { if (isThisATest()) { // Tests shouldn't download the whole video // Just change this.url to the download URL so the test knows we found it. - logger.debug("Test rip, found URL: " + url); + LOGGER.debug("Test rip, found URL: " + url); this.url = url; return true; } @@ -71,52 +72,54 @@ public abstract class VideoRipper extends AbstractRipper { } @Override - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { return addURLToDownload(url, saveAs); } - /** * Creates & sets working directory based on URL. - * @param url - * Target URL + * + * @param url Target URL */ @Override public void setWorkingDir(URL url) throws IOException { String path = Utils.getWorkingDirectory().getCanonicalPath(); + if (!path.endsWith(File.separator)) { path += File.separator; } + path += "videos" + File.separator; - this.workingDir = new File(path); - if (!this.workingDir.exists()) { - logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir)); - this.workingDir.mkdirs(); + workingDir = new File(path); + + if (!workingDir.exists()) { + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(workingDir)); + workingDir.mkdirs(); } - logger.debug("Set working directory to: " + this.workingDir); + + LOGGER.debug("Set working directory to: " + workingDir); } - + /** - * @return - * Returns % of video done downloading. + * @return Returns % of video done downloading. */ @Override public int getCompletionPercentage() { return (int) (100 * (bytesCompleted / (float) bytesTotal)); } - + /** * Runs if download successfully completed. - * @param url - * Target URL - * @param saveAs - * Path to file, including filename. + * + * @param url Target URL + * @param saveAs Path to file, including filename. */ @Override public void downloadCompleted(URL url, File saveAs) { if (observer == null) { return; } + try { String path = Utils.removeCWD(saveAs); RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path); @@ -124,65 +127,61 @@ public abstract class VideoRipper extends AbstractRipper { checkIfComplete(); } catch (Exception e) { - logger.error("Exception while updating observer: ", e); + LOGGER.error("Exception while updating observer: ", e); } } - + /** * Runs if the download errored somewhere. - * @param url - * Target URL - * @param reason - * Reason why the download failed. + * + * @param url Target URL + * @param reason Reason why the download failed. */ @Override public void downloadErrored(URL url, String reason) { if (observer == null) { return; } + observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason)); checkIfComplete(); } - - + /** * Runs if user tries to redownload an already existing File. - * @param url - * Target URL - * @param file - * Existing file + * + * @param url Target URL + * @param file Existing file */ @Override public void downloadExists(URL url, File file) { if (observer == null) { return; } + observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file)); checkIfComplete(); } /** * Gets the status and changes it to a human-readable form. - * @return - * Status of current download. + * + * @return Status of current download. */ @Override public String getStatusText() { - StringBuilder sb = new StringBuilder(); - sb.append(getCompletionPercentage()) - .append("% ") - .append(" - ") - .append(Utils.bytesToHumanReadable(bytesCompleted)) - .append(" / ") - .append(Utils.bytesToHumanReadable(bytesTotal)); - return sb.toString(); + return String.valueOf(getCompletionPercentage()) + + "% - " + + Utils.bytesToHumanReadable(bytesCompleted) + + " / " + + Utils.bytesToHumanReadable(bytesTotal); } - @Override /** * Sanitizes URL. * Usually just returns itself. */ + @Override public URL sanitizeURL(URL url) throws MalformedURLException { return url; } @@ -195,8 +194,10 @@ public abstract class VideoRipper extends AbstractRipper { if (observer == null) { return; } + if (bytesCompleted >= bytesTotal) { super.checkIfComplete(); } } -} + +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java index 74504b12..a11b08a4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AerisdiesRipper.java @@ -9,7 +9,6 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; @@ -58,7 +57,7 @@ public class AerisdiesRipper extends AbstractHTMLRipper { return getHost() + "_" + getGID(url) + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java index 5978bff5..af4c0c1f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java @@ -76,7 +76,7 @@ public class BatoRipper extends AbstractHTMLRipper { return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_"); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } @@ -113,7 +113,7 @@ public class BatoRipper extends AbstractHTMLRipper { s = s.replaceAll("var prevCha = null;", ""); s = s.replaceAll("var nextCha = \\.*;", ""); String json = s.replaceAll("var images = ", "").replaceAll(";", ""); - logger.info(s); + LOGGER.info(s); JSONObject images = new JSONObject(json); for (int i = 1; i < images.length() +1; i++) { result.add(images.getString(Integer.toString(i))); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java index ace305c1..9076ef1f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java @@ -68,11 +68,11 @@ public class ChanRipper extends AbstractHTMLRipper { String subject = doc.select(".post.op > .postinfo > .subject").first().text(); return getHost() + "_" + getGID(url) + "_" + subject; } catch (NullPointerException e) { - logger.warn("Failed to get thread title from " + url); + LOGGER.warn("Failed to get thread title from " + url); } } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } // Fall back on the GID return getHost() + "_" + getGID(url); @@ -144,7 +144,7 @@ public class ChanRipper extends AbstractHTMLRipper { private boolean isURLBlacklisted(String url) { for (String blacklist_item : url_piece_blacklist) { if (url.contains(blacklist_item)) { - logger.debug("Skipping link that contains '"+blacklist_item+"': " + url); + LOGGER.debug("Skipping link that contains '"+blacklist_item+"': " + url); return true; } } @@ -185,7 +185,7 @@ public class ChanRipper extends AbstractHTMLRipper { } // Don't download the same URL twice if (imageURLs.contains(href)) { - logger.debug("Already attempted: " + href); + LOGGER.debug("Already attempted: " + href); continue; } imageURLs.add(href); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java index 160febfc..cb0b765f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java @@ -63,7 +63,7 @@ public class CheveretoRipper extends AbstractHTMLRipper { return getHost() + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java index aac968b4..99942adb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java @@ -122,14 +122,14 @@ public class DeviantartRipper extends AbstractHTMLRipper { String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz"))); if (username == null || password == null) { - logger.debug("No DeviantArt login provided."); + LOGGER.debug("No DeviantArt login provided."); cookies.put("agegate_state","1"); // Bypasses the age gate } else { // Attempt Login try { cookies = loginToDeviantart(); } catch (IOException e) { - logger.warn("Failed to login: ", e); + LOGGER.warn("Failed to login: ", e); cookies.put("agegate_state","1"); // Bypasses the age gate } } @@ -161,7 +161,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\"")); return script.replace("\\/", "/"); } catch (StringIndexOutOfBoundsException e) { - logger.debug("Unable to get json link from " + page.location()); + LOGGER.debug("Unable to get json link from " + page.location()); } } } @@ -204,7 +204,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { } } if (triedURLs.contains(fullSize)) { - logger.warn("Already tried to download " + fullSize); + LOGGER.warn("Already tried to download " + fullSize); continue; } triedURLs.add(fullSize); @@ -222,7 +222,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { List textURLs = new ArrayList<>(); // Iterate over all thumbnails for (Element thumb : page.select("div.zones-container span.thumb")) { - logger.info(thumb.attr("href")); + LOGGER.info(thumb.attr("href")); if (isStopped()) { break; } @@ -256,7 +256,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { if (!sleep(PAGE_SLEEP_TIME)) { throw new IOException("Interrupted while waiting to load next page: " + nextPage); } - logger.info("Found next page: " + nextPage); + LOGGER.info("Found next page: " + nextPage); return Http.url(nextPage) .cookies(cookies) .get(); @@ -351,7 +351,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize}; // TODO Make this not make a newline if someone just types \n into the description. } catch (IOException ioe) { - logger.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'"); + LOGGER.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'"); return null; } } @@ -379,7 +379,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { if (!els.isEmpty()) { // Large image fsimage = els.get(0).attr("src"); - logger.info("Found large-scale: " + fsimage); + LOGGER.info("Found large-scale: " + fsimage); if (fsimage.contains("//orig")) { return fsimage; } @@ -389,7 +389,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { if (!els.isEmpty()) { // Full-size image String downloadLink = els.get(0).attr("href"); - logger.info("Found download button link: " + downloadLink); + LOGGER.info("Found download button link: " + downloadLink); HttpURLConnection con = (HttpURLConnection) new URL(downloadLink).openConnection(); con.setRequestProperty("Referer",this.url.toString()); String cookieString = ""; @@ -406,7 +406,7 @@ public class DeviantartRipper extends AbstractHTMLRipper { con.disconnect(); if (location.contains("//orig")) { fsimage = location; - logger.info("Found image download: " + location); + LOGGER.info("Found image download: " + location); } } if (fsimage != null) { @@ -415,9 +415,9 @@ public class DeviantartRipper extends AbstractHTMLRipper { throw new IOException("No download page found"); } catch (IOException ioe) { try { - logger.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'"); + LOGGER.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'"); String lessThanFull = thumbToFull(thumb, false); - logger.info("Falling back to less-than-full-size image " + lessThanFull); + LOGGER.info("Falling back to less-than-full-size image " + lessThanFull); return lessThanFull; } catch (Exception e) { return null; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java index 43728fb4..69d778cf 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java @@ -70,7 +70,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { return getHost() + "_" + elems.first().text(); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } @@ -103,7 +103,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { int retries = 3; while (true) { sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); - logger.info("Retrieving " + url); + LOGGER.info("Retrieving " + url); doc = Http.url(url) .referrer(this.url) .cookies(cookies) @@ -112,7 +112,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { if (retries == 0) { throw new IOException("Hit rate limit and maximum number of retries, giving up"); } - logger.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining"); + LOGGER.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining"); retries--; try { Thread.sleep(IP_BLOCK_SLEEP_TIME); @@ -137,7 +137,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { if (blackListedTags == null) { return null; } - logger.info("Blacklisted tags " + blackListedTags[0]); + LOGGER.info("Blacklisted tags " + blackListedTags[0]); List tagsOnPage = getTags(doc); for (String tag : blackListedTags) { for (String pageTag : tagsOnPage) { @@ -153,9 +153,9 @@ public class EHentaiRipper extends AbstractHTMLRipper { private List getTags(Document doc) { List tags = new ArrayList<>(); - logger.info("Getting tags"); + LOGGER.info("Getting tags"); for (Element tag : doc.select("td > div > a")) { - logger.info("Found tag " + tag.text()); + LOGGER.info("Found tag " + tag.text()); tags.add(tag.text()); } return tags; @@ -168,7 +168,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { albumDoc = getPageWithRetries(this.url); } this.lastURL = this.url.toExternalForm(); - logger.info("Checking blacklist"); + LOGGER.info("Checking blacklist"); String blacklistedTag = checkTags(albumDoc, Utils.getConfigStringArray("ehentai.blacklist.tags")); if (blacklistedTag != null) { sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " + @@ -187,13 +187,13 @@ public class EHentaiRipper extends AbstractHTMLRipper { // Find next page Elements hrefs = doc.select(".ptt a"); if (hrefs.isEmpty()) { - logger.info("doc: " + doc.html()); + LOGGER.info("doc: " + doc.html()); throw new IOException("No navigation links found"); } // Ensure next page is different from the current page String nextURL = hrefs.last().attr("href"); if (nextURL.equals(this.lastURL)) { - logger.info("lastURL = nextURL : " + nextURL); + LOGGER.info("lastURL = nextURL : " + nextURL); throw new IOException("Reached last page of results"); } // Sleep before loading next page @@ -223,7 +223,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { Thread.sleep(IMAGE_SLEEP_TIME); } catch (InterruptedException e) { - logger.warn("Interrupted while waiting to load next image", e); + LOGGER.warn("Interrupted while waiting to load next image", e); } } @@ -259,13 +259,13 @@ public class EHentaiRipper extends AbstractHTMLRipper { // Attempt to find image elsewise (Issue #41) images = doc.select("img#img"); if (images.isEmpty()) { - logger.warn("Image not found at " + this.url); + LOGGER.warn("Image not found at " + this.url); return; } } Element image = images.first(); String imgsrc = image.attr("src"); - logger.info("Found URL " + imgsrc + " via " + images.get(0)); + LOGGER.info("Found URL " + imgsrc + " via " + images.get(0)); Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$"); Matcher m = p.matcher(imgsrc); if (m.matches()) { @@ -286,7 +286,7 @@ public class EHentaiRipper extends AbstractHTMLRipper { addURLToDownload(new URL(imgsrc), prefix); } } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java index 3e06695b..13a22213 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java @@ -70,7 +70,7 @@ public class EightmusesRipper extends AbstractHTMLRipper { return getHost() + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } @@ -96,19 +96,19 @@ public class EightmusesRipper extends AbstractHTMLRipper { if (thumb.attr("href").contains("/comics/album/")) { String subUrl = "https://www.8muses.com" + thumb.attr("href"); try { - logger.info("Retrieving " + subUrl); + LOGGER.info("Retrieving " + subUrl); sendUpdate(STATUS.LOADING_RESOURCE, subUrl); Document subPage = Http.url(subUrl).get(); // If the page below this one has images this line will download them List subalbumImages = getURLsFromPage(subPage); - logger.info("Found " + subalbumImages.size() + " images in subalbum"); + LOGGER.info("Found " + subalbumImages.size() + " images in subalbum"); } catch (IOException e) { - logger.warn("Error while loading subalbum " + subUrl, e); + LOGGER.warn("Error while loading subalbum " + subUrl, e); } } else if (thumb.attr("href").contains("/comics/picture/")) { - logger.info("This page is a album"); - logger.info("Ripping image"); + LOGGER.info("This page is a album"); + LOGGER.info("Ripping image"); if (super.isStopped()) break; // Find thumbnail image source String image = null; @@ -122,7 +122,7 @@ public class EightmusesRipper extends AbstractHTMLRipper { imageHref = "https://www.8muses.com" + imageHref; } try { - logger.info("Retrieving full-size image location from " + imageHref); + LOGGER.info("Retrieving full-size image location from " + imageHref); image = getFullSizeImage(imageHref); URL imageUrl = new URL(image); if (Utils.getConfigBoolean("8muses.use_short_names", false)) { @@ -134,7 +134,7 @@ public class EightmusesRipper extends AbstractHTMLRipper { x++; } catch (IOException e) { - logger.error("Failed to get full-size image from " + imageHref); + LOGGER.error("Failed to get full-size image from " + imageHref); continue; } } @@ -152,7 +152,7 @@ public class EightmusesRipper extends AbstractHTMLRipper { private String getFullSizeImage(String imageUrl) throws IOException { sendUpdate(STATUS.LOADING_RESOURCE, imageUrl); - logger.info("Getting full sized image from " + imageUrl); + LOGGER.info("Getting full sized image from " + imageUrl); Document doc = new Http(imageUrl).get(); // Retrieve the webpage of the image URL String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page return "https://www.8muses.com/image/fm/" + imageName; @@ -166,14 +166,14 @@ public class EightmusesRipper extends AbstractHTMLRipper { } private String getSubdir(String rawHref) { - logger.info("Raw title: " + rawHref); + LOGGER.info("Raw title: " + rawHref); String title = rawHref; title = title.replaceAll("8muses - Sex and Porn Comics", ""); title = title.replaceAll("\t\t", ""); title = title.replaceAll("\n", ""); title = title.replaceAll("\\| ", ""); title = title.replace(" ", "-"); - logger.info(title); + LOGGER.info(title); return title; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java index 737b8092..d64e9600 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java @@ -103,7 +103,7 @@ public class EroShareRipper extends AbstractHTMLRipper { return getHost() + "_" + getGID(url) + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } @@ -139,7 +139,7 @@ public class EroShareRipper extends AbstractHTMLRipper { try { video_page = Http.url("eroshae.com" + link.attr("href")).get(); } catch (IOException e) { - logger.warn("Failed to log link in Jsoup"); + LOGGER.warn("Failed to log link in Jsoup"); video_page = null; e.printStackTrace(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java index 84e63e76..a01c134a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java @@ -52,7 +52,7 @@ public class EromeRipper extends AbstractHTMLRipper { return getHost() + "_" + getGID(url) + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java index 4ade270b..6591dd01 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java @@ -153,7 +153,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper { /** Convert username to UserID. */ private String getUserID(String username) throws IOException { - logger.info("Fetching user ID for " + username); + LOGGER.info("Fetching user ID for " + username); JSONObject json = new Http("https://api.500px.com/v1/" + "users/show" + "?username=" + username + @@ -165,7 +165,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper { @Override public JSONObject getFirstPage() throws IOException { URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY); - logger.debug("apiURL: " + apiURL); + LOGGER.debug("apiURL: " + apiURL); JSONObject json = Http.url(apiURL).getJSON(); if (baseURL.contains("/galleries?")) { @@ -185,7 +185,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper { + "?rpp=100" + "&image_size=5" + "&consumer_key=" + CONSUMER_KEY; - logger.info("Loading " + blogURL); + LOGGER.info("Loading " + blogURL); sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID); JSONObject thisJSON = Http.url(blogURL).getJSON(); JSONArray thisPhotos = thisJSON.getJSONArray("photos"); @@ -216,7 +216,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper { + "&rpp=100" + "&image_size=5" + "&consumer_key=" + CONSUMER_KEY; - logger.info("Loading " + blogURL); + LOGGER.info("Loading " + blogURL); sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username); JSONObject thisJSON = Http.url(blogURL).getJSON(); JSONArray thisPhotos = thisJSON.getJSONArray("photos"); @@ -268,20 +268,20 @@ public class FivehundredpxRipper extends AbstractJSONRipper { Document doc; Elements images = new Elements(); try { - logger.debug("Loading " + rawUrl); + LOGGER.debug("Loading " + rawUrl); super.retrievingSource(rawUrl); doc = Http.url(rawUrl).get(); images = doc.select("div#preload img"); } catch (IOException e) { - logger.error("Error fetching full-size image from " + rawUrl, e); + LOGGER.error("Error fetching full-size image from " + rawUrl, e); } if (!images.isEmpty()) { imageURL = images.first().attr("src"); - logger.debug("Found full-size non-watermarked image: " + imageURL); + LOGGER.debug("Found full-size non-watermarked image: " + imageURL); } else { - logger.debug("Falling back to image_url from API response"); + LOGGER.debug("Falling back to image_url from API response"); imageURL = photo.getString("image_url"); imageURL = imageURL.replaceAll("/4\\.", "/5."); // See if there's larger images @@ -289,14 +289,14 @@ public class FivehundredpxRipper extends AbstractJSONRipper { String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + "."); sleep(10); if (urlExists(fsURL)) { - logger.info("Found larger image at " + fsURL); + LOGGER.info("Found larger image at " + fsURL); imageURL = fsURL; break; } } } if (imageURL == null) { - logger.error("Failed to find image for photo " + photo.toString()); + LOGGER.error("Failed to find image for photo " + photo.toString()); } else { imageURLs.add(imageURL); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java index a1a1c2b8..10e786d3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java @@ -251,7 +251,7 @@ public class FlickrRipper extends AbstractHTMLRipper { Document doc = getLargestImagePageDocument(this.url); Elements fullsizeImages = doc.select("div#allsizes-photo img"); if (fullsizeImages.isEmpty()) { - logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'"); + LOGGER.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'"); } else { String prefix = ""; @@ -263,7 +263,7 @@ public class FlickrRipper extends AbstractHTMLRipper { } } } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java index ec8fc5cf..440e9ae5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java @@ -81,7 +81,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper { private String getImageFromPost(String url) { try { - logger.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content")); + LOGGER.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content")); return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"); } catch (IOException e) { return ""; @@ -103,7 +103,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper { Elements urlElements = page.select("figure.t-image > b > u > a"); for (Element e : urlElements) { urls.add(urlBase + e.select("a").first().attr("href")); - logger.debug("Desc2 " + urlBase + e.select("a").first().attr("href")); + LOGGER.debug("Desc2 " + urlBase + e.select("a").first().attr("href")); } return urls; } @@ -122,21 +122,21 @@ public class FuraffinityRipper extends AbstractHTMLRipper { // Try to find the description Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]"); if (els.isEmpty()) { - logger.debug("No description at " + page); + LOGGER.debug("No description at " + page); throw new IOException("No description found"); } - logger.debug("Description found!"); + LOGGER.debug("Description found!"); Document documentz = resp.parse(); Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is. // Would break completely if FurAffinity changed site layout. documentz.outputSettings(new Document.OutputSettings().prettyPrint(false)); ele.select("br").append("\\n"); ele.select("p").prepend("\\n\\n"); - logger.debug("Returning description at " + page); + LOGGER.debug("Returning description at " + page); String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)); return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name. } catch (IOException ioe) { - logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'"); + LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'"); return null; } } @@ -171,12 +171,12 @@ public class FuraffinityRipper extends AbstractHTMLRipper { out.write(text.getBytes()); out.close(); } catch (IOException e) { - logger.error("[!] Error creating save file path for description '" + url + "':", e); + LOGGER.error("[!] Error creating save file path for description '" + url + "':", e); return false; } - logger.debug("Downloading " + url + "'s description to " + saveFileAs); + LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); if (!saveFileAs.getParentFile().exists()) { - logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); saveFileAs.getParentFile().mkdirs(); } return true; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java index 5ebb7297..45ce2b92 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java @@ -67,7 +67,7 @@ public class FuskatorRipper extends AbstractHTMLRipper { try { baseUrl = URLDecoder.decode(baseUrl, "UTF-8"); } catch (UnsupportedEncodingException e) { - logger.warn("Error while decoding " + baseUrl, e); + LOGGER.warn("Error while decoding " + baseUrl, e); } if (baseUrl.startsWith("//")) { baseUrl = "http:" + baseUrl; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java index 271d0313..2afc79d1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java @@ -40,7 +40,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper { return getHost() + "_" + elems.first().text(); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java index d45cbed5..a3403b9a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java @@ -56,7 +56,7 @@ public class HbrowseRipper extends AbstractHTMLRipper { return getHost() + "_" + title + "_" + getGID(url); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java index e0dbff17..cb521523 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java @@ -38,7 +38,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper { @Override public boolean pageContainsAlbums(URL url) { - logger.info("Page contains albums"); + LOGGER.info("Page contains albums"); Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?"); Matcher mat = pat.matcher(url.toExternalForm()); if (mat.matches()) { @@ -95,7 +95,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper { return getHost() + "_" + getGID(url); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java index a81b47f9..33a5a964 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java @@ -98,7 +98,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper { cookies.putAll(resp.cookies()); } else { - logger.info("unable to find csrf_token and set filter"); + LOGGER.info("unable to find csrf_token and set filter"); } resp = Http.url(url) @@ -139,19 +139,19 @@ public class HentaifoundryRipper extends AbstractHTMLRipper { } Matcher imgMatcher = imgRegex.matcher(thumb.attr("href")); if (!imgMatcher.matches()) { - logger.info("Couldn't find user & image ID in " + thumb.attr("href")); + LOGGER.info("Couldn't find user & image ID in " + thumb.attr("href")); continue; } Document imagePage; try { - logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href")); + LOGGER.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href")); imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get(); } catch (IOException e) { - logger.debug(e.getMessage()); - logger.debug("Warning: imagePage is null!"); + LOGGER.debug(e.getMessage()); + LOGGER.debug("Warning: imagePage is null!"); imagePage = null; } // This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java index 3e92cc61..c5f0bbd4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java @@ -9,9 +9,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import org.json.JSONArray; -import org.json.JSONObject; import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; @@ -57,7 +55,7 @@ public class HitomiRipper extends AbstractHTMLRipper { public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); String json = doc.text().replaceAll("var galleryinfo =", ""); - logger.info(json); + LOGGER.info(json); JSONArray json_data = new JSONArray(json); for (int i = 0; i < json_data.length(); i++) { result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name")); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java index 1eabefb9..5b481258 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java @@ -14,8 +14,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -import javax.print.Doc; - public class HypnohubRipper extends AbstractHTMLRipper { public HypnohubRipper(URL url) throws IOException { @@ -55,14 +53,14 @@ public class HypnohubRipper extends AbstractHTMLRipper { } private String ripPost(String url) throws IOException { - logger.info(url); + LOGGER.info(url); Document doc = Http.url(url).get(); return "https:" + doc.select("img.image").attr("src"); } private String ripPost(Document doc) { - logger.info(url); + LOGGER.info(url); return "https:" + doc.select("img.image").attr("src"); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java index b33f5624..3aca67cf 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java @@ -99,16 +99,16 @@ public class ImagebamRipper extends AbstractHTMLRipper { // Attempt to use album title as GID Elements elems = getFirstPage().select("legend"); String title = elems.first().text(); - logger.info("Title text: '" + title + "'"); + LOGGER.info("Title text: '" + title + "'"); Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$"); Matcher m = p.matcher(title); if (m.matches()) { return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")"; } - logger.info("Doesn't match " + p.pattern()); + LOGGER.info("Doesn't match " + p.pattern()); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } @@ -148,14 +148,14 @@ public class ImagebamRipper extends AbstractHTMLRipper { //the direct link to the image seems to always be linked in the part of the html. if (metaTag.attr("property").equals("og:image")) { imgsrc = metaTag.attr("content"); - logger.info("Found URL " + imgsrc); + LOGGER.info("Found URL " + imgsrc); break;//only one (useful) image possible for an "image page". } } //for debug, or something goes wrong. if (imgsrc.isEmpty()) { - logger.warn("Image not found at " + this.url); + LOGGER.warn("Image not found at " + this.url); return; } @@ -167,7 +167,7 @@ public class ImagebamRipper extends AbstractHTMLRipper { addURLToDownload(new URL(imgsrc), prefix); } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java index 83d4f098..1a658c59 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java @@ -43,7 +43,7 @@ public class ImagefapRipper extends AbstractHTMLRipper { newURL += "p"; } newURL += "gid=" + gid + "&view=2"; - logger.debug("Changed URL from " + url + " to " + newURL); + LOGGER.debug("Changed URL from " + url + " to " + newURL); return new URL(newURL); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java index f9175656..f50a84a0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java @@ -102,7 +102,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper { // Find image Elements images = doc.select("a > img"); if (images.isEmpty()) { - logger.warn("Image not found at " + this.url); + LOGGER.warn("Image not found at " + this.url); return; } Element image = images.first(); @@ -115,7 +115,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper { } addURLToDownload(new URL(imgsrc), prefix); } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java index 280060eb..d3f94456 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java @@ -108,24 +108,24 @@ public class ImgurRipper extends AlbumRipper { String title = null; final String defaultTitle1 = "Imgur: The most awesome images on the Internet"; final String defaultTitle2 = "Imgur: The magic of the Internet"; - logger.info("Trying to get album title"); + LOGGER.info("Trying to get album title"); elems = albumDoc.select("meta[property=og:title]"); if (elems != null) { title = elems.attr("content"); - logger.debug("Title is " + title); + LOGGER.debug("Title is " + title); } // This is here encase the album is unnamed, to prevent // Imgur: The most awesome images on the Internet from being added onto the album name if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) { - logger.debug("Album is untitled or imgur is returning the default title"); + LOGGER.debug("Album is untitled or imgur is returning the default title"); // We set the title to "" here because if it's found in the next few attempts it will be changed // but if it's nto found there will be no reason to set it later title = ""; - logger.debug("Trying to use title tag to get title"); + LOGGER.debug("Trying to use title tag to get title"); elems = albumDoc.select("title"); if (elems != null) { if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) { - logger.debug("Was unable to get album title or album was untitled"); + LOGGER.debug("Was unable to get album title or album was untitled"); } else { title = elems.text(); @@ -159,29 +159,29 @@ public class ImgurRipper extends AlbumRipper { case ALBUM: // Fall-through case USER_ALBUM: - logger.info("Album type is USER_ALBUM"); + LOGGER.info("Album type is USER_ALBUM"); // Don't call getAlbumTitle(this.url) with this // as it seems to cause the album to be downloaded to a subdir. ripAlbum(this.url); break; case SERIES_OF_IMAGES: - logger.info("Album type is SERIES_OF_IMAGES"); + LOGGER.info("Album type is SERIES_OF_IMAGES"); ripAlbum(this.url); break; case SINGLE_IMAGE: - logger.info("Album type is SINGLE_IMAGE"); + LOGGER.info("Album type is SINGLE_IMAGE"); ripSingleImage(this.url); break; case USER: - logger.info("Album type is USER"); + LOGGER.info("Album type is USER"); ripUserAccount(url); break; case SUBREDDIT: - logger.info("Album type is SUBREDDIT"); + LOGGER.info("Album type is SUBREDDIT"); ripSubreddit(url); break; case USER_IMAGES: - logger.info("Album type is USER_IMAGES"); + LOGGER.info("Album type is USER_IMAGES"); ripUserImages(url); break; } @@ -241,7 +241,7 @@ public class ImgurRipper extends AlbumRipper { String[] imageIds = m.group(1).split(","); for (String imageId : imageIds) { // TODO: Fetch image with ID imageId - logger.debug("Fetching image info for ID " + imageId); + LOGGER.debug("Fetching image info for ID " + imageId); try { JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON(); if (!json.has("image")) { @@ -259,7 +259,7 @@ public class ImgurRipper extends AlbumRipper { ImgurImage theImage = new ImgurImage(new URL(original)); album.addImage(theImage); } catch (Exception e) { - logger.error("Got exception while fetching imgur ID " + imageId, e); + LOGGER.error("Got exception while fetching imgur ID " + imageId, e); } } } @@ -271,7 +271,7 @@ public class ImgurRipper extends AlbumRipper { if (!strUrl.contains(",")) { strUrl += "/all"; } - logger.info(" Retrieving " + strUrl); + LOGGER.info(" Retrieving " + strUrl); Document doc = getDocument(strUrl); // Try to use embedded JSON to retrieve images Matcher m = getEmbeddedJsonMatcher(doc); @@ -283,7 +283,7 @@ public class ImgurRipper extends AlbumRipper { .getJSONArray("images"); return createImgurAlbumFromJsonArray(url, jsonImages); } catch (JSONException e) { - logger.debug("Error while parsing JSON at " + url + ", continuing", e); + LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e); } } @@ -291,10 +291,10 @@ public class ImgurRipper extends AlbumRipper { // http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID // At the least, get the thumbnails. - logger.info("[!] Falling back to /noscript method"); + LOGGER.info("[!] Falling back to /noscript method"); String newUrl = url.toExternalForm() + "/noscript"; - logger.info(" Retrieving " + newUrl); + LOGGER.info(" Retrieving " + newUrl); doc = Jsoup.connect(newUrl) .userAgent(USER_AGENT) .get(); @@ -311,7 +311,7 @@ public class ImgurRipper extends AlbumRipper { image = "http:" + thumb.select("img").attr("src"); } else { // Unable to find image in this div - logger.error("[!] Unable to find image in div: " + thumb.toString()); + LOGGER.error("[!] Unable to find image in div: " + thumb.toString()); continue; } if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) { @@ -368,7 +368,7 @@ public class ImgurRipper extends AlbumRipper { * @throws IOException */ private void ripUserAccount(URL url) throws IOException { - logger.info("Retrieving " + url); + LOGGER.info("Retrieving " + url); sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); Document doc = Http.url(url).get(); for (Element album : doc.select("div.cover a")) { @@ -383,7 +383,7 @@ public class ImgurRipper extends AlbumRipper { ripAlbum(albumURL, albumID); Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000); } catch (Exception e) { - logger.error("Error while ripping album: " + e.getMessage(), e); + LOGGER.error("Error while ripping album: " + e.getMessage(), e); } } } @@ -420,7 +420,7 @@ public class ImgurRipper extends AlbumRipper { } Thread.sleep(1000); } catch (Exception e) { - logger.error("Error while ripping user images: " + e.getMessage(), e); + LOGGER.error("Error while ripping user images: " + e.getMessage(), e); break; } } @@ -435,7 +435,7 @@ public class ImgurRipper extends AlbumRipper { pageURL += "/"; } pageURL += "page/" + page + "/miss?scrolled"; - logger.info(" Retrieving " + pageURL); + LOGGER.info(" Retrieving " + pageURL); Document doc = Http.url(pageURL).get(); Elements imgs = doc.select(".post img"); for (Element img : imgs) { @@ -456,7 +456,7 @@ public class ImgurRipper extends AlbumRipper { try { Thread.sleep(1000); } catch (InterruptedException e) { - logger.error("Interrupted while waiting to load next album: ", e); + LOGGER.error("Interrupted while waiting to load next album: ", e); break; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java index 94331650..63a07e91 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java @@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.AbstractJSONRipper; import com.rarchives.ripme.utils.Http; import org.jsoup.Connection; -import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import com.rarchives.ripme.ui.RipStatusMessage; @@ -67,7 +66,7 @@ public class InstagramRipper extends AbstractJSONRipper { @Override public URL sanitizeURL(URL url) throws MalformedURLException { URL san_url = new URL(url.toExternalForm().replaceAll("\\?hl=\\S*", "")); - logger.info("sanitized URL is " + san_url.toExternalForm()); + LOGGER.info("sanitized URL is " + san_url.toExternalForm()); return san_url; } @@ -184,7 +183,7 @@ public class InstagramRipper extends AbstractJSONRipper { @Override public JSONObject getFirstPage() throws IOException { Connection.Response resp = Http.url(url).response(); - logger.info(resp.cookies()); + LOGGER.info(resp.cookies()); csrftoken = resp.cookie("csrftoken"); Document p = resp.parse(); // Get the query hash so we can download the next page @@ -197,7 +196,7 @@ public class InstagramRipper extends AbstractJSONRipper { Document doc = Http.url("https://www.instagram.com/p/" + videoID).get(); return doc.select("meta[property=og:video]").attr("content"); } catch (IOException e) { - logger.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID); + LOGGER.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID); } return ""; } @@ -279,9 +278,9 @@ public class InstagramRipper extends AbstractJSONRipper { addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode")); } } catch (MalformedURLException e) { - logger.error("Unable to download slide show, URL was malformed"); + LOGGER.error("Unable to download slide show, URL was malformed"); } catch (IOException e) { - logger.error("Unable to download slide show"); + LOGGER.error("Unable to download slide show"); } } } @@ -312,7 +311,7 @@ public class InstagramRipper extends AbstractJSONRipper { } } else { // We're ripping from a single page - logger.info("Ripping from single page"); + LOGGER.info("Ripping from single page"); imageURLs = getPostsFromSinglePage(json); } @@ -321,7 +320,7 @@ public class InstagramRipper extends AbstractJSONRipper { private String getIGGis(String variables) { String stringToMD5 = rhx_gis + ":" + variables; - logger.debug("String to md5 is \"" + stringToMD5 + "\""); + LOGGER.debug("String to md5 is \"" + stringToMD5 + "\""); try { byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8"); @@ -355,7 +354,7 @@ public class InstagramRipper extends AbstractJSONRipper { toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis); // Sleep for a while to avoid a ban - logger.info(toreturn); + LOGGER.info(toreturn); if (!pageHasImages(toreturn)) { throw new IOException("No more pages"); } @@ -371,7 +370,7 @@ public class InstagramRipper extends AbstractJSONRipper { sleep(2500); String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}"; String ig_gis = getIGGis(vars); - logger.info(ig_gis); + LOGGER.info(ig_gis); toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis); if (!pageHasImages(toreturn)) { @@ -419,11 +418,11 @@ public class InstagramRipper extends AbstractJSONRipper { return new JSONObject(sb.toString()); } catch (MalformedURLException e) { - logger.info("Unable to get query_hash, " + url + " is a malformed URL"); + LOGGER.info("Unable to get query_hash, " + url + " is a malformed URL"); return null; } catch (IOException e) { - logger.info("Unable to get query_hash"); - logger.info(e.getMessage()); + LOGGER.info("Unable to get query_hash"); + LOGGER.info(e.getMessage()); return null; } } @@ -444,11 +443,11 @@ public class InstagramRipper extends AbstractJSONRipper { in.close(); } catch (MalformedURLException e) { - logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL"); + LOGGER.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL"); return null; } catch (IOException e) { - logger.info("Unable to get query_hash"); - logger.info(e.getMessage()); + LOGGER.info("Unable to get query_hash"); + LOGGER.info(e.getMessage()); return null; } if (!rippingTag) { @@ -467,6 +466,11 @@ public class InstagramRipper extends AbstractJSONRipper { if (m.find()) { return m.group(1); } + jsP = Pattern.compile("o.pagination},queryId:.([a-zA-Z0-9]+)."); + m = jsP.matcher(sb.toString()); + if (m.find()) { + return m.group(1); + } } else { Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+)."); @@ -475,7 +479,7 @@ public class InstagramRipper extends AbstractJSONRipper { return m.group(1); } } - logger.error("Could not find query_hash on " + jsFileURL); + LOGGER.error("Could not find query_hash on " + jsFileURL); return null; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java index f5782dab..c7f7df71 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java @@ -54,16 +54,16 @@ public class JagodibujaRipper extends AbstractHTMLRipper { sleep(500); Document comicPage = Http.url(comicPageUrl.attr("href")).get(); Element elem = comicPage.select("span.full-size-link > a").first(); - logger.info("Got link " + elem.attr("href")); + LOGGER.info("Got link " + elem.attr("href")); try { addURLToDownload(new URL(elem.attr("href")), ""); } catch (MalformedURLException e) { - logger.warn("Malformed URL"); + LOGGER.warn("Malformed URL"); e.printStackTrace(); } result.add(elem.attr("href")); } catch (IOException e) { - logger.info("Error loading " + comicPageUrl); + LOGGER.info("Error loading " + comicPageUrl); } } return result; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java index 376d1292..68197721 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java @@ -36,7 +36,7 @@ public class LusciousRipper extends AbstractHTMLRipper { // "url" is an instance field of the superclass Document page = Http.url(url).get(); URL firstUrl = new URL("https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href")); - logger.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href")); + LOGGER.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href")); return Http.url(firstUrl).get(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java index 8f8f8e68..cabb4188 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java @@ -15,8 +15,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -import javax.print.Doc; - public class ManganeloRipper extends AbstractHTMLRipper { public ManganeloRipper(URL url) throws IOException { @@ -67,7 +65,7 @@ public class ManganeloRipper extends AbstractHTMLRipper { } private List getURLsFromChap(String url) { - logger.debug("Getting urls from " + url); + LOGGER.debug("Getting urls from " + url); List result = new ArrayList<>(); try { Document doc = Http.url(url).get(); @@ -82,7 +80,7 @@ public class ManganeloRipper extends AbstractHTMLRipper { } private List getURLsFromChap(Document doc) { - logger.debug("Getting urls from " + url); + LOGGER.debug("Getting urls from " + url); List result = new ArrayList<>(); for (Element el : doc.select("img.img_content")) { result.add(el.attr("src")); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java index d8fb3655..b2fee8e5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java @@ -75,7 +75,7 @@ public class MotherlessRipper extends AlbumRipper { if (isStopped()) { break; } - logger.info("Retrieving " + nextURL); + LOGGER.info("Retrieving " + nextURL); sendUpdate(STATUS.LOADING_RESOURCE, nextURL); Document doc = Http.url(nextURL) .referrer("http://motherless.com") @@ -152,10 +152,10 @@ public class MotherlessRipper extends AlbumRipper { } addURLToDownload(new URL(file), prefix); } else { - logger.warn("[!] could not find '__fileurl' at " + url); + LOGGER.warn("[!] could not find '__fileurl' at " + url); } } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java index f455e58b..952b434e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java @@ -109,7 +109,7 @@ public class NatalieMuRipper extends AbstractHTMLRipper { imgUrl = imgUrl.replace("list_thumb_inbox","xlarge"); // Don't download the same URL twice if (imageURLs.contains(imgUrl)) { - logger.debug("Already attempted: " + imgUrl); + LOGGER.debug("Already attempted: " + imgUrl); continue; } imageURLs.add(imgUrl); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java index 1c7cf8dc..6454c508 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NewsfilterRipper.java @@ -43,7 +43,7 @@ public class NewsfilterRipper extends AlbumRipper { public void rip() throws IOException { String gid = getGID(this.url); String theurl = "http://newsfilter.org/gallery/" + gid; - logger.info("Loading " + theurl); + LOGGER.info("Loading " + theurl); Connection.Response resp = Jsoup.connect(theurl) .timeout(5000) diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java index 098f1e45..3585b6bb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java @@ -86,7 +86,7 @@ public class NfsfwRipper extends AlbumRipper { String nextURL = nextAlbum.first; String nextSubalbum = nextAlbum.second; sendUpdate(STATUS.LOADING_RESOURCE, nextURL); - logger.info(" Retrieving " + nextURL); + LOGGER.info(" Retrieving " + nextURL); if (albumDoc == null) { albumDoc = Http.url(nextURL).get(); } @@ -116,7 +116,7 @@ public class NfsfwRipper extends AlbumRipper { break; } } catch (MalformedURLException mue) { - logger.warn("Invalid URL: " + imagePage); + LOGGER.warn("Invalid URL: " + imagePage); } } if (isThisATest()) { @@ -133,7 +133,7 @@ public class NfsfwRipper extends AlbumRipper { try { Thread.sleep(1000); } catch (InterruptedException e) { - logger.error("Interrupted while waiting to load next page", e); + LOGGER.error("Interrupted while waiting to load next page", e); throw new IOException(e); } } @@ -168,7 +168,7 @@ public class NfsfwRipper extends AlbumRipper { .get(); Elements images = doc.select(".gbBlock img"); if (images.isEmpty()) { - logger.error("Failed to find image at " + this.url); + LOGGER.error("Failed to find image at " + this.url); return; } String file = images.first().attr("src"); @@ -181,7 +181,7 @@ public class NfsfwRipper extends AlbumRipper { } addURLToDownload(new URL(file), prefix, this.subdir); } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java index 7752f18c..daef205e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java @@ -9,7 +9,6 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; @@ -102,7 +101,7 @@ public class NhentaiRipper extends AbstractHTMLRipper { if (blackListedTags == null) { return null; } - logger.info("Blacklisted tags " + blackListedTags[0]); + LOGGER.info("Blacklisted tags " + blackListedTags[0]); List tagsOnPage = getTags(doc); for (String tag : blackListedTags) { for (String pageTag : tagsOnPage) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java index f5cf5cf7..ad0159b3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java @@ -35,7 +35,7 @@ public class PhotobucketRipper extends AlbumRipper { } public URL sanitizeURL(URL url) throws MalformedURLException { - logger.info(url); + LOGGER.info(url); String u = url.toExternalForm(); if (u.contains("?")) { u = u.substring(0, u.indexOf("?")); @@ -100,12 +100,12 @@ public class PhotobucketRipper extends AlbumRipper { } String nextSub = subsToRip.remove(0); rippedSubs.add(nextSub); - logger.info("Attempting to rip next subalbum: " + nextSub); + LOGGER.info("Attempting to rip next subalbum: " + nextSub); try { pageResponse = null; subalbums = ripAlbumAndGetSubalbums(nextSub); } catch (IOException e) { - logger.error("Error while ripping " + nextSub, e); + LOGGER.error("Error while ripping " + nextSub, e); break; } for (String subalbum : subalbums) { @@ -131,7 +131,7 @@ public class PhotobucketRipper extends AlbumRipper { pageIndex++; if (pageIndex > 1 || pageResponse == null) { url = theUrl + String.format("?sort=3&page=%d", pageIndex); - logger.info(" Retrieving " + url); + LOGGER.info(" Retrieving " + url); pageResponse = Http.url(url).response(); } Document albumDoc = pageResponse.parse(); @@ -153,7 +153,7 @@ public class PhotobucketRipper extends AlbumRipper { } } if (jsonString == null) { - logger.error("Unable to find JSON data at URL: " + url); + LOGGER.error("Unable to find JSON data at URL: " + url); break; } JSONObject json = new JSONObject(jsonString); @@ -189,7 +189,7 @@ public class PhotobucketRipper extends AlbumRipper { + "&albumPath=" + currentAlbumPath // %2Falbums%2Fab10%2FSpazzySpizzy" + "&json=1"; try { - logger.info("Loading " + apiUrl); + LOGGER.info("Loading " + apiUrl); JSONObject json = Http.url(apiUrl).getJSON(); JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums"); for (int i = 0; i < subalbums.length(); i++) { @@ -202,7 +202,7 @@ public class PhotobucketRipper extends AlbumRipper { result.add(suburl); } } catch (IOException e) { - logger.error("Failed to get subalbums from " + apiUrl, e); + LOGGER.error("Failed to get subalbums from " + apiUrl, e); } return result; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java index 8b3b8d7d..bffd0f2d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java @@ -46,7 +46,7 @@ public class PornhubRipper extends AlbumRipper { try { // Attempt to use album title as GID if (albumDoc == null) { - logger.info(" Retrieving " + url.toExternalForm()); + LOGGER.info(" Retrieving " + url.toExternalForm()); sendUpdate(STATUS.LOADING_RESOURCE, url.toString()); albumDoc = Http.url(url).get(); } @@ -54,7 +54,7 @@ public class PornhubRipper extends AlbumRipper { return HOST + "_" + elems.get(0).text(); } catch (Exception e) { // Fall back to default album naming convention - logger.warn("Failed to get album title from " + url, e); + LOGGER.warn("Failed to get album title from " + url, e); } return super.getAlbumTitle(url); } @@ -82,7 +82,7 @@ public class PornhubRipper extends AlbumRipper { String nextUrl = this.url.toExternalForm(); if (albumDoc == null) { - logger.info(" Retrieving album page " + nextUrl); + LOGGER.info(" Retrieving album page " + nextUrl); sendUpdate(STATUS.LOADING_RESOURCE, nextUrl); albumDoc = Http.url(nextUrl) .referrer(this.url) @@ -92,8 +92,8 @@ public class PornhubRipper extends AlbumRipper { // Find thumbnails Elements thumbs = albumDoc.select(".photoBlockBox li"); if (thumbs.isEmpty()) { - logger.debug("albumDoc: " + albumDoc); - logger.debug("No images found at " + nextUrl); + LOGGER.debug("albumDoc: " + albumDoc); + LOGGER.debug("No images found at " + nextUrl); return; } @@ -113,7 +113,7 @@ public class PornhubRipper extends AlbumRipper { try { Thread.sleep(IMAGE_SLEEP_TIME); } catch (InterruptedException e) { - logger.warn("Interrupted while waiting to load next image", e); + LOGGER.warn("Interrupted while waiting to load next image", e); } } @@ -155,7 +155,7 @@ public class PornhubRipper extends AlbumRipper { Elements images = doc.select("#photoImageSection img"); Element image = images.first(); String imgsrc = image.attr("src"); - logger.info("Found URL " + imgsrc + " via " + images.get(0)); + LOGGER.info("Found URL " + imgsrc + " via " + images.get(0)); // Provide prefix and let the AbstractRipper "guess" the filename String prefix = ""; @@ -167,7 +167,7 @@ public class PornhubRipper extends AlbumRipper { addURLToDownload(imgurl, prefix); } catch (IOException e) { - logger.error("[!] Exception while loading/parsing " + this.url, e); + LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java index e8798476..bb60d616 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java @@ -4,13 +4,10 @@ import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; -import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.ripper.rippers.video.GfycatRipper; import org.json.JSONArray; import org.json.JSONObject; import org.json.JSONTokener; @@ -20,9 +17,6 @@ import com.rarchives.ripme.ui.UpdateUtils; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.RipUtils; import com.rarchives.ripme.utils.Utils; -import org.jsoup.Jsoup; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; public class RedditRipper extends AlbumRipper { @@ -110,7 +104,7 @@ public class RedditRipper extends AlbumRipper { try { Thread.sleep(2000); } catch (InterruptedException e) { - logger.warn("Interrupted while sleeping", e); + LOGGER.warn("Interrupted while sleeping", e); } return nextURL; } @@ -122,7 +116,7 @@ public class RedditRipper extends AlbumRipper { try { Thread.sleep(timeDiff); } catch (InterruptedException e) { - logger.warn("[!] Interrupted while waiting to load next page", e); + LOGGER.warn("[!] Interrupted while waiting to load next page", e); return new JSONArray(); } } @@ -141,7 +135,7 @@ public class RedditRipper extends AlbumRipper { } else if (jsonObj instanceof JSONArray) { jsonArray = (JSONArray) jsonObj; } else { - logger.warn("[!] Unable to parse JSON: " + jsonString); + LOGGER.warn("[!] Unable to parse JSON: " + jsonString); } return jsonArray; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java index d83d5930..6d14dc30 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SankakuComplexRipper.java @@ -89,10 +89,10 @@ public class SankakuComplexRipper extends AbstractHTMLRipper { String siteURL = "https://" + subDomain + "sankakucomplex.com"; // Get the page the full sized image is on Document subPage = Http.url(siteURL + postLink).get(); - logger.info("Checking page " + siteURL + postLink); + LOGGER.info("Checking page " + siteURL + postLink); imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href")); } catch (IOException e) { - logger.warn("Error while loading page " + postLink, e); + LOGGER.warn("Error while loading page " + postLink, e); } } return imageURLs; @@ -112,7 +112,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper { // Only logged in users can see past page 25 // Trying to rip page 26 will throw a no images found error if (!nextPage.contains("page=26")) { - logger.info("Getting next page: " + pagination.attr("abs:next-page-url")); + LOGGER.info("Getting next page: " + pagination.attr("abs:next-page-url")); return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get(); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java index 9de3d2ae..d6a0f9cb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java @@ -50,7 +50,7 @@ public class SinfestRipper extends AbstractHTMLRipper { @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("td.style5 > a > img").last(); - logger.info(elem.parent().attr("href")); + LOGGER.info(elem.parent().attr("href")); if (elem == null || elem.parent().attr("href").equals("view.php?date=")) { throw new IOException("No more pages"); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java index 4cfaf485..b331bbce 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java @@ -65,9 +65,9 @@ public class StaRipper extends AbstractHTMLRipper { cookies.putAll(resp.cookies()); thumbPage = resp.parse(); } catch (MalformedURLException e) { - logger.info(thumbPageURL + " is a malformed URL"); + LOGGER.info(thumbPageURL + " is a malformed URL"); } catch (IOException e) { - logger.info(e.getMessage()); + LOGGER.info(e.getMessage()); } String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href"); if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) { @@ -97,10 +97,10 @@ public class StaRipper extends AbstractHTMLRipper { .followRedirects(false) .execute(); String imageURL = response.header("Location"); - logger.info(imageURL); + LOGGER.info(imageURL); return imageURL; } catch (IOException e) { - logger.info("Got error message " + e.getMessage() + " trying to download " + url); + LOGGER.info("Got error message " + e.getMessage() + " trying to download " + url); return null; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java index 8ef019a2..369ce741 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java @@ -56,7 +56,7 @@ public class TapasticRipper extends AbstractHTMLRipper { List urls = new ArrayList<>(); String html = page.data(); if (!html.contains("episodeList : ")) { - logger.error("No 'episodeList' found at " + this.url); + LOGGER.error("No 'episodeList' found at " + this.url); return urls; } String jsonString = Utils.between(html, "episodeList : ", ",\n").get(0); @@ -93,7 +93,7 @@ public class TapasticRipper extends AbstractHTMLRipper { } } } catch (IOException e) { - logger.error("[!] Exception while downloading " + url, e); + LOGGER.error("[!] Exception while downloading " + url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java index 630a0d0f..d25ef345 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java @@ -69,7 +69,7 @@ public class TeenplanetRipper extends AlbumRipper { @Override public void rip() throws IOException { int index = 0; - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); if (albumDoc == null) { albumDoc = Http.url(url).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java index 70c023d3..4886503a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java @@ -40,7 +40,7 @@ public class TsuminoRipper extends AbstractHTMLRipper { JSONObject json = new JSONObject(jsonInfo); return json.getJSONArray("reader_page_urls"); } catch (IOException e) { - logger.info(e); + LOGGER.info(e); sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/" + getAlbumID() + " and try again"); return null; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java index 41da5d13..b7a437b2 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java @@ -51,11 +51,11 @@ public class TumblrRipper extends AlbumRipper { } if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) { - logger.info("Using api key: " + API_KEY); + LOGGER.info("Using api key: " + API_KEY); return API_KEY; } else { String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX"); - logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey); + LOGGER.info("Using user tumblr.auth api key: " + userDefinedAPIKey); return userDefinedAPIKey; } @@ -66,7 +66,7 @@ public class TumblrRipper extends AlbumRipper { "FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4", "qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz"); int genNum = new Random().nextInt(APIKEYS.size()); - logger.info(genNum); + LOGGER.info(genNum); final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS return API_KEY; } @@ -96,10 +96,10 @@ public class TumblrRipper extends AlbumRipper { if (StringUtils.countMatches(u, ".") > 2) { url = new URL(u.replace(".tumblr.com", "")); if (isTumblrURL(url)) { - logger.info("Detected tumblr site: " + url); + LOGGER.info("Detected tumblr site: " + url); } else { - logger.info("Not a tumblr site: " + url); + LOGGER.info("Not a tumblr site: " + url); } } return url; @@ -115,7 +115,7 @@ public class TumblrRipper extends AlbumRipper { int status = json.getJSONObject("meta").getInt("status"); return status == 200; } catch (IOException e) { - logger.error("Error while checking possible tumblr domain: " + url.getHost(), e); + LOGGER.error("Error while checking possible tumblr domain: " + url.getHost(), e); } return false; } @@ -150,7 +150,7 @@ public class TumblrRipper extends AlbumRipper { String apiURL = getTumblrApiURL(mediaType, offset); - logger.info("Retrieving " + apiURL); + LOGGER.info("Retrieving " + apiURL); sendUpdate(STATUS.LOADING_RESOURCE, apiURL); JSONObject json = null; @@ -165,7 +165,7 @@ public class TumblrRipper extends AlbumRipper { if (status.getStatusCode() == HttpURLConnection.HTTP_UNAUTHORIZED && !useDefaultApiKey) { retry = true; } else if (status.getStatusCode() == 429) { - logger.error("Tumblr rate limit has been exceeded"); + LOGGER.error("Tumblr rate limit has been exceeded"); sendUpdate(STATUS.DOWNLOAD_ERRORED,"Tumblr rate limit has been exceeded"); exceededRateLimit = true; break; @@ -178,7 +178,7 @@ public class TumblrRipper extends AlbumRipper { String apiKey = getApiKey(); String message = "401 Unauthorized. Will retry with default Tumblr API key: " + apiKey; - logger.info(message); + LOGGER.info(message); sendUpdate(STATUS.DOWNLOAD_WARN, message); Utils.setConfigString(TUMBLR_AUTH_CONFIG_KEY, apiKey); // save the default key to the config @@ -186,7 +186,7 @@ public class TumblrRipper extends AlbumRipper { // retry loading the JSON apiURL = getTumblrApiURL(mediaType, offset); - logger.info("Retrieving " + apiURL); + LOGGER.info("Retrieving " + apiURL); sendUpdate(STATUS.LOADING_RESOURCE, apiURL); json = Http.url(apiURL).getJSON(); @@ -195,7 +195,7 @@ public class TumblrRipper extends AlbumRipper { try { Thread.sleep(1000); } catch (InterruptedException e) { - logger.error("[!] Interrupted while waiting to load next album:", e); + LOGGER.error("[!] Interrupted while waiting to load next album:", e); break; } @@ -224,7 +224,7 @@ public class TumblrRipper extends AlbumRipper { posts = json.getJSONObject("response").getJSONArray("posts"); if (posts.length() == 0) { - logger.info(" Zero posts returned."); + LOGGER.info(" Zero posts returned."); return false; } @@ -251,7 +251,7 @@ public class TumblrRipper extends AlbumRipper { addURLToDownload(redirectedURL); } } catch (Exception e) { - logger.error("[!] Error while parsing photo in " + photo, e); + LOGGER.error("[!] Error while parsing photo in " + photo, e); } } } else if (post.has("video_url")) { @@ -259,7 +259,7 @@ public class TumblrRipper extends AlbumRipper { fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:")); addURLToDownload(fileURL); } catch (Exception e) { - logger.error("[!] Error while parsing video in " + post, e); + LOGGER.error("[!] Error while parsing video in " + post, e); return true; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java index 60c7a6bb..cd5cf582 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java @@ -103,13 +103,13 @@ public class TwitterRipper extends AlbumRipper { .getJSONObject(resource) .getJSONObject(api); int remaining = stats.getInt("remaining"); - logger.info(" Twitter " + resource + " calls remaining: " + remaining); + LOGGER.info(" Twitter " + resource + " calls remaining: " + remaining); if (remaining < 20) { - logger.error("Twitter API calls exhausted: " + stats.toString()); + LOGGER.error("Twitter API calls exhausted: " + stats.toString()); throw new IOException("Less than 20 API calls remaining; not enough to rip."); } } catch (JSONException e) { - logger.error("JSONException: ", e); + LOGGER.error("JSONException: ", e); throw new IOException("Error while parsing JSON: " + body, e); } } @@ -142,7 +142,7 @@ public class TwitterRipper extends AlbumRipper { private List getTweets(String url) throws IOException { List tweets = new ArrayList<>(); - logger.info(" Retrieving " + url); + LOGGER.info(" Retrieving " + url); Document doc = Http.url(url) .ignoreContentType() .header("Authorization", "Bearer " + accessToken) @@ -171,7 +171,7 @@ public class TwitterRipper extends AlbumRipper { private int parseTweet(JSONObject tweet) throws MalformedURLException { int parsedCount = 0; if (!tweet.has("extended_entities")) { - logger.error("XXX Tweet doesn't have entitites"); + LOGGER.error("XXX Tweet doesn't have entitites"); return 0; } @@ -201,7 +201,7 @@ public class TwitterRipper extends AlbumRipper { addURLToDownload(new URL(url)); parsedCount++; } else { - logger.debug("Unexpected media_url: " + url); + LOGGER.debug("Unexpected media_url: " + url); } } } @@ -229,14 +229,14 @@ public class TwitterRipper extends AlbumRipper { for (int i = 0; i < MAX_REQUESTS; i++) { List tweets = getTweets(getApiURL(lastMaxID - 1)); if (tweets.isEmpty()) { - logger.info(" No more tweets found."); + LOGGER.info(" No more tweets found."); break; } - logger.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets); + LOGGER.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets); if (tweets.size() == 1 && lastMaxID.equals(tweets.get(0).getString("id_str")) ) { - logger.info(" No more tweet found."); + LOGGER.info(" No more tweet found."); break; } @@ -256,7 +256,7 @@ public class TwitterRipper extends AlbumRipper { try { Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { - logger.error("[!] Interrupted while waiting to load more results", e); + LOGGER.error("[!] Interrupted while waiting to load more results", e); break; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java index d7cd8e10..b3f48505 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwodgalleriesRipper.java @@ -66,7 +66,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper { try { login(); } catch (IOException e) { - logger.error("Failed to login", e); + LOGGER.error("Failed to login", e); } String url = getURL(getGID(this.url), offset); return Http.url(url) diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java index abdb0320..d2ea95fc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java @@ -43,7 +43,7 @@ public class ViewcomicRipper extends AbstractHTMLRipper { return getHost() + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java index caf24916..b2472cc9 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java @@ -71,7 +71,7 @@ public class VkRipper extends AlbumRipper { String[] jsonStrings = doc.toString().split(""); JSONObject json = new JSONObject(jsonStrings[jsonStrings.length - 1]); JSONArray videos = json.getJSONArray("all"); - logger.info("Found " + videos.length() + " videos"); + LOGGER.info("Found " + videos.length() + " videos"); for (int i = 0; i < videos.length(); i++) { JSONArray jsonVideo = videos.getJSONArray(i); int vidid = jsonVideo.getInt(1); @@ -85,7 +85,7 @@ public class VkRipper extends AlbumRipper { try { Thread.sleep(500); } catch (InterruptedException e) { - logger.error("Interrupted while waiting to fetch next video URL", e); + LOGGER.error("Interrupted while waiting to fetch next video URL", e); break; } } @@ -96,7 +96,7 @@ public class VkRipper extends AlbumRipper { Map photoIDsToURLs = new HashMap<>(); int offset = 0; while (true) { - logger.info(" Retrieving " + this.url); + LOGGER.info(" Retrieving " + this.url); // al=1&offset=80&part=1 Map postData = new HashMap<>(); @@ -119,7 +119,7 @@ public class VkRipper extends AlbumRipper { Set photoIDsToGet = new HashSet<>(); for (Element a : elements) { if (!a.attr("onclick").contains("showPhoto('")) { - logger.error("a: " + a); + LOGGER.error("a: " + a); continue; } String photoID = a.attr("onclick"); @@ -134,12 +134,12 @@ public class VkRipper extends AlbumRipper { try { photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID)); } catch (IOException e) { - logger.error("Exception while retrieving photo id " + photoID, e); + LOGGER.error("Exception while retrieving photo id " + photoID, e); continue; } } if (!photoIDsToURLs.containsKey(photoID)) { - logger.error("Could not find URL for photo ID: " + photoID); + LOGGER.error("Could not find URL for photo ID: " + photoID); continue; } String url = photoIDsToURLs.get(photoID); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java index 00eed1f4..11cb6a73 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java @@ -6,10 +6,11 @@ import com.rarchives.ripme.utils.Http; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; -import java.util.ArrayList; -import java.util.List; +import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; + +import org.json.JSONObject; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; @@ -19,7 +20,11 @@ import org.jsoup.select.Elements; /** * For ripping VSCO pictures. */ -public class VscoRipper extends AbstractHTMLRipper{ +public class VscoRipper extends AbstractHTMLRipper { + + int pageNumber = 1; + JSONObject profileJSON; + private static final String DOMAIN = "vsco.co", HOST = "vsco"; @@ -73,40 +78,83 @@ public class VscoRipper extends AbstractHTMLRipper{ try { toRip.add(vscoImageToURL(url.toExternalForm())); } catch (IOException ex) { - logger.debug("Failed to convert " + url.toString() + " to external form."); + LOGGER.debug("Failed to convert " + url.toString() + " to external form."); } - } else {//want to rip a member profile - /* - String baseURL = "https://vsco.co"; - - - //Find all the relative links, adds Base URL, then adds them to an ArrayList - List relativeLinks = new ArrayList<>(); - Elements links = page.getElementsByTag("a"); - - - for(Element link : links){ - System.out.println(link.toString()); - //if link includes "/media/", add it to the list - if (link.attr("href").contains("/media")) { - try { - String relativeURL = vscoImageToURL(link.attr("href")); - toRip.add(baseURL + relativeURL); - } catch (IOException ex) { - logger.debug("Could not add \"" + link.toString() + "\" to list for ripping."); - } + } else { + String username = getUserName(); + String userTkn = getUserTkn(username); + String siteID = getSiteID(userTkn, username); + while (true) { + profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID); + for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) { + toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url")); } + if (pageNumber * 1000 > profileJSON.getInt("total")) { + return toRip; + } + pageNumber++; } - */ - logger.debug("Sorry, RipMe currently only supports ripping single images."); - - + + } return toRip; } + private String getUserTkn(String username) { + String userinfoPage = "https://vsco.co/content/Static/userinfo"; + String referer = "https://vsco.co/" + username + "/images/1"; + Map cookies = new HashMap<>(); + cookies.put("vs_anonymous_id", UUID.randomUUID().toString()); + try { + Element doc = Http.url(userinfoPage).cookies(cookies).referrer(referer).ignoreContentType().get().body(); + String json = doc.text().replaceAll("define\\(", ""); + json = json.replaceAll("\\)", ""); + return new JSONObject(json).getString("tkn"); + } catch (IOException e) { + LOGGER.error("Could not get user tkn"); + return null; + } + } + + private String getUserName() { + Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9]+)/images/[0-9]+"); + Matcher m = p.matcher(url.toExternalForm()); + + if (m.matches()) { + String user = m.group(1); + return user; + } + return null; + } + + private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) { + String size = "1000"; + String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size; + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url(purl).cookies(cookies).getJSON(); + return j; + } catch (IOException e) { + LOGGER.error("Could not profile images"); + return null; + } + } + + private String getSiteID(String tkn, String username) { + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON(); + return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id")); + } catch (IOException e) { + LOGGER.error("Could not get site id"); + return null; + } + } + private String vscoImageToURL(String url) throws IOException{ Document page = Jsoup.connect(url).userAgent(USER_AGENT) .get(); @@ -121,14 +169,14 @@ public class VscoRipper extends AbstractHTMLRipper{ givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number) result = givenURL; - logger.debug("Found image URL: " + givenURL); + LOGGER.debug("Found image URL: " + givenURL); break;//immediatly stop after getting URL (there should only be 1 image to be downloaded) } } //Means website changed, things need to be fixed. if (result.isEmpty()){ - logger.error("Could not find image URL at: " + url); + LOGGER.error("Could not find image URL at: " + url); } return result; @@ -176,12 +224,7 @@ public class VscoRipper extends AbstractHTMLRipper{ public Document getFirstPage() throws IOException { return Http.url(url).get(); } - - @Override - public Document getNextPage(Document doc) throws IOException { - return super.getNextPage(doc); - } - + @Override public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java index ce01f1cf..385f6d1f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java @@ -36,7 +36,7 @@ public class XhamsterRipper extends AbstractHTMLRipper { URLToReturn = URLToReturn.replaceAll("m.xhamster.com", "xhamster.com"); URLToReturn = URLToReturn.replaceAll("\\w\\w.xhamster.com", "xhamster.com"); URL san_url = new URL(URLToReturn.replaceAll("xhamster.com", "m.xhamster.com")); - logger.info("sanitized URL is " + san_url.toExternalForm()); + LOGGER.info("sanitized URL is " + san_url.toExternalForm()); return san_url; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java index c5dc447e..d0ca82b2 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java @@ -55,11 +55,11 @@ public class ZizkiRipper extends AbstractHTMLRipper { Element authorSpan = getFirstPage().select("span[class=creator]").first(); String author = authorSpan.select("a").first().text(); - logger.debug("Author: " + author); + LOGGER.debug("Author: " + author); return getHost() + "_" + author + "_" + title.trim(); } catch (IOException e) { // Fall back to default album naming convention - logger.info("Unable to find title at " + url); + LOGGER.info("Unable to find title at " + url); } return super.getAlbumTitle(url); } @@ -78,9 +78,9 @@ public class ZizkiRipper extends AbstractHTMLRipper { public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); // Page contains images - logger.info("Look for images."); + LOGGER.info("Look for images."); for (Element thumb : page.select("img")) { - logger.info("Img"); + LOGGER.info("Img"); if (super.isStopped()) break; // Find thumbnail image source String image = null; @@ -89,7 +89,7 @@ public class ZizkiRipper extends AbstractHTMLRipper { if (thumb.hasAttr("typeof")) { img_type = thumb.attr("typeof"); if (img_type.equals("foaf:Image")) { - logger.debug("Found image with " + img_type); + LOGGER.debug("Found image with " + img_type); if (thumb.parent() != null && thumb.parent().parent() != null && thumb.parent().parent().attr("class") != null && @@ -97,7 +97,7 @@ public class ZizkiRipper extends AbstractHTMLRipper { ) { src = thumb.attr("src"); - logger.debug("Found url with " + src); + LOGGER.debug("Found url with " + src); if (!src.contains("zizki.com")) { } else { imageURLs.add(src.replace("/styles/medium/public/","/styles/large/public/")); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java index fb38b216..16526945 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java @@ -55,7 +55,7 @@ public class CliphunterRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); String html = Http.url(url).get().html(); String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21); jsonString = jsonString.substring(0, jsonString.indexOf("'")); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java index ccaaa225..75577597 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/GfycatRipper.java @@ -65,7 +65,7 @@ public class GfycatRipper extends VideoRipper { * @throws IOException */ public static String getVideoURL(URL url) throws IOException { - logger.info("Retrieving " + url.toExternalForm()); + LOGGER.info("Retrieving " + url.toExternalForm()); //Sanitize the URL first url = new URL(url.toExternalForm().replace("/gifs/detail", "")); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java index 9a2e47b1..6af8840b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/MotherlessVideoRipper.java @@ -52,10 +52,10 @@ public class MotherlessVideoRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url); + LOGGER.info(" Retrieving " + this.url); String html = Http.url(this.url).get().toString(); if (html.contains("__fileurl = '")) { - logger.error("WTF"); + LOGGER.error("WTF"); } List vidUrls = Utils.between(html, "__fileurl = '", "';"); if (vidUrls.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java index ea98bcfd..c5489870 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java @@ -54,7 +54,7 @@ public class PornhubRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url.toExternalForm()); + LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); String html = doc.body().html(); Pattern p = Pattern.compile("^.*flashvars_[0-9]+ = (.+});.*$", Pattern.DOTALL); @@ -81,10 +81,10 @@ public class PornhubRipper extends VideoRipper { } addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); } catch (JSONException e) { - logger.error("Error while parsing JSON at " + url, e); + LOGGER.error("Error while parsing JSON at " + url, e); throw e; } catch (Exception e) { - logger.error("Error while retrieving video URL at " + url, e); + LOGGER.error("Error while retrieving video URL at " + url, e); throw new IOException(e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java index 8a483066..d977708a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java @@ -55,7 +55,7 @@ public class TwitchVideoRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); //Get user friendly filename from page title diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java index f36d7ce4..078b32a5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java @@ -53,7 +53,7 @@ public class ViddmeRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url.toExternalForm()); + LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); Elements videos = doc.select("meta[name=twitter:player:stream]"); if (videos.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java index bae7a965..052b2cbe 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java @@ -54,7 +54,7 @@ public class VidearnRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); List mp4s = Utils.between(doc.html(), "file:\"", "\""); if (mp4s.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java index d2931b0b..1ca59676 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VineRipper.java @@ -54,7 +54,7 @@ public class VineRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url.toExternalForm()); + LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); Elements props = doc.select("meta[property=twitter:player:stream]"); if (props.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java index c610a470..70528727 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java @@ -52,7 +52,7 @@ public class VkRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url); + LOGGER.info(" Retrieving " + this.url); String videoURL = getVideoURLAtPage(this.url.toExternalForm()); addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url)); waitForThreads(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java index 09df1c8d..9043bfeb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XhamsterRipper.java @@ -54,7 +54,7 @@ public class XhamsterRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Elements videos = doc.select("div.player-container > a"); if (videos.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java index 6dde798d..ef71a4bb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/XvideosRipper.java @@ -54,12 +54,12 @@ public class XvideosRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url); + LOGGER.info(" Retrieving " + this.url); Document doc = Http.url(this.url).get(); Elements scripts = doc.select("script"); for (Element e : scripts) { if (e.html().contains("html5player.setVideoUrlHigh")) { - logger.info("Found the right script"); + LOGGER.info("Found the right script"); String[] lines = e.html().split("\n"); for (String line: lines) { if (line.contains("html5player.setVideoUrlHigh")) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java index 0c87f175..2399ea87 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YoupornRipper.java @@ -54,7 +54,7 @@ public class YoupornRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info(" Retrieving " + this.url); + LOGGER.info(" Retrieving " + this.url); Document doc = Http.url(this.url).get(); Elements videos = doc.select("video"); if (videos.isEmpty()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java index 34c10947..2891efb5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java @@ -55,7 +55,7 @@ public class YuvutuRipper extends VideoRipper { @Override public void rip() throws IOException { - logger.info("Retrieving " + this.url); + LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Element iframe = doc.select("iframe").first(); String iframeSrc = iframe.attr("src"); diff --git a/src/main/java/com/rarchives/ripme/ui/MainWindow.java b/src/main/java/com/rarchives/ripme/ui/MainWindow.java index 8d881fdf..0193aa08 100644 --- a/src/main/java/com/rarchives/ripme/ui/MainWindow.java +++ b/src/main/java/com/rarchives/ripme/ui/MainWindow.java @@ -66,7 +66,7 @@ import javax.swing.UnsupportedLookAndFeelException; */ public final class MainWindow implements Runnable, RipStatusHandler { - private static final Logger logger = Logger.getLogger(MainWindow.class); + private static final Logger LOGGER = Logger.getLogger(MainWindow.class); private boolean isRipping = false; // Flag to indicate if we're ripping something @@ -279,7 +279,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); } catch (ClassNotFoundException | InstantiationException | UnsupportedLookAndFeelException | IllegalAccessException e) { - logger.error("[!] Exception setting system theme:", e); + LOGGER.error("[!] Exception setting system theme:", e); } ripTextfield = new JTextField("", 20); @@ -821,7 +821,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { chosenPath = chosenFile.getCanonicalPath(); } catch (Exception e) { - logger.error("Error while getting selected path: ", e); + LOGGER.error("Error while getting selected path: ", e); return; } configSaveDirLabel.setText(Utils.shortenPath(chosenPath)); @@ -877,7 +877,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { break; } Logger.getRootLogger().setLevel(newLevel); - logger.setLevel(newLevel); + LOGGER.setLevel(newLevel); ConsoleAppender ca = (ConsoleAppender)Logger.getRootLogger().getAppender("stdout"); if (ca != null) { ca.setThreshold(newLevel); @@ -951,7 +951,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { Desktop.getDesktop().browse(URI.create("http://github.com/ripmeapp/ripme")); } catch (IOException e) { - logger.error("Exception while opening project home page", e); + LOGGER.error("Exception while opening project home page", e); } } }); @@ -1024,10 +1024,10 @@ public final class MainWindow implements Runnable, RipStatusHandler { HISTORY.clear(); if (historyFile.exists()) { try { - logger.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath()); + LOGGER.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath()); HISTORY.fromFile(historyFile.getCanonicalPath()); } catch (IOException e) { - logger.error("Failed to load history from file " + historyFile, e); + LOGGER.error("Failed to load history from file " + historyFile, e); JOptionPane.showMessageDialog(null, "RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" + "Error: " + e.getMessage() + "\n\n" + @@ -1037,7 +1037,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { JOptionPane.ERROR_MESSAGE); } } else { - logger.info(rb.getString("loading.history.from.configuration")); + LOGGER.info(rb.getString("loading.history.from.configuration")); HISTORY.fromList(Utils.getConfigList("download.history")); if (HISTORY.toList().isEmpty()) { // Loaded from config, still no entries. @@ -1067,7 +1067,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { HISTORY.toFile(historyFile.toString()); Utils.setConfigList("download.history", Collections.emptyList()); } catch (IOException e) { - logger.error("Failed to save history to file " + historyFile, e); + LOGGER.error("Failed to save history to file " + historyFile, e); } } @@ -1089,7 +1089,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { Thread.sleep(500); } catch (InterruptedException ie) { - logger.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie); + LOGGER.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie); } ripNextAlbum(); } else { @@ -1113,7 +1113,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { url = new URL(urlString); } catch (MalformedURLException e) { - logger.error("[!] Could not generate URL for '" + urlString + "'", e); + LOGGER.error("[!] Could not generate URL for '" + urlString + "'", e); error("Given URL is not valid, expecting http://website.com/page/..."); return null; } @@ -1128,7 +1128,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { ripper.setup(); } catch (Exception e) { failed = true; - logger.error("Could not find ripper for URL " + url, e); + LOGGER.error("Could not find ripper for URL " + url, e); error(e.getMessage()); } if (!failed) { @@ -1146,7 +1146,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { } return t; } catch (Exception e) { - logger.error("[!] Error while ripping: " + e.getMessage(), e); + LOGGER.error("[!] Error while ripping: " + e.getMessage(), e); error("Unable to rip this URL: " + e.getMessage()); } } @@ -1197,28 +1197,28 @@ public final class MainWindow implements Runnable, RipStatusHandler { switch(msg.getStatus()) { case LOADING_RESOURCE: case DOWNLOAD_STARTED: - if (logger.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabledFor(Level.INFO)) { appendLog("Downloading " + msg.getObject(), Color.BLACK); } break; case DOWNLOAD_COMPLETE: - if (logger.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabledFor(Level.INFO)) { appendLog("Downloaded " + msg.getObject(), Color.GREEN); } break; case DOWNLOAD_ERRORED: - if (logger.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabledFor(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } break; case DOWNLOAD_WARN: - if (logger.isEnabledFor(Level.WARN)) { + if (LOGGER.isEnabledFor(Level.WARN)) { appendLog((String) msg.getObject(), Color.ORANGE); } break; case RIP_ERRORED: - if (logger.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabledFor(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } stopButton.setEnabled(false); @@ -1270,7 +1270,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { try { Desktop.getDesktop().open(new File(event.getActionCommand())); } catch (Exception e) { - logger.error(e); + LOGGER.error(e); } }); pack(); @@ -1341,7 +1341,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { Utils.setConfigInteger("window.y", y); Utils.setConfigInteger("window.w", w); Utils.setConfigInteger("window.h", h); - logger.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")"); + LOGGER.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")"); } private static void restoreWindowPosition(Frame frame) { @@ -1356,7 +1356,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { int w = Utils.getConfigInteger("window.w", -1); int h = Utils.getConfigInteger("window.h", -1); if (x < 0 || y < 0 || w <= 0 || h <= 0) { - logger.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config"); + LOGGER.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config"); mainFrame.setLocationRelativeTo(null); // default to middle of screen return; } diff --git a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java index 95af45c6..82e1144a 100644 --- a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java +++ b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java @@ -20,7 +20,7 @@ import com.rarchives.ripme.utils.Utils; public class UpdateUtils { private static final Logger logger = Logger.getLogger(UpdateUtils.class); - private static final String DEFAULT_VERSION = "1.7.50"; + private static final String DEFAULT_VERSION = "1.7.51"; private static final String REPO_NAME = "ripmeapp/ripme"; private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json"; private static final String mainFileName = "ripme.jar"; @@ -39,6 +39,20 @@ public class UpdateUtils { } return thisVersion; } + + private static String getChangeList(JSONObject rj) { + JSONArray jsonChangeList = rj.getJSONArray("changeList"); + StringBuilder changeList = new StringBuilder(); + for (int i = 0; i < jsonChangeList.length(); i++) { + String change = jsonChangeList.getString(i); + if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) { + break; + } + changeList.append("\n").append(change); + } + return changeList.toString(); + } + public static void updateProgramCLI() { logger.info("Checking for update..."); @@ -61,15 +75,10 @@ public class UpdateUtils { } String jsonString = doc.body().html().replaceAll(""", "\""); ripmeJson = new JSONObject(jsonString); - JSONArray jsonChangeList = ripmeJson.getJSONArray("changeList"); - StringBuilder changeList = new StringBuilder(); - for (int i = 0; i < jsonChangeList.length(); i++) { - String change = jsonChangeList.getString(i); - if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) { - break; - } - changeList.append("
+ ").append(change); - } + + String changeList = getChangeList(ripmeJson); + + logger.info("Change log: \n" + changeList); String latestVersion = ripmeJson.getString("latestVersion"); if (UpdateUtils.isNewerVersion(latestVersion)) { @@ -111,15 +120,8 @@ public class UpdateUtils { } String jsonString = doc.body().html().replaceAll(""", "\""); ripmeJson = new JSONObject(jsonString); - JSONArray jsonChangeList = ripmeJson.getJSONArray("changeList"); - StringBuilder changeList = new StringBuilder(); - for (int i = 0; i < jsonChangeList.length(); i++) { - String change = jsonChangeList.getString(i); - if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) { - break; - } - changeList.append("
+ ").append(change); - } + + String changeList = getChangeList(ripmeJson); String latestVersion = ripmeJson.getString("latestVersion"); if (UpdateUtils.isNewerVersion(latestVersion)) { @@ -127,7 +129,7 @@ public class UpdateUtils { int result = JOptionPane.showConfirmDialog( null, "New version (" + latestVersion + ") is available!" - + "

Recent changes:" + changeList.toString() + + "

Recent changes:" + changeList + "

Do you want to download and run the newest version?", "RipMe Updater", JOptionPane.YES_NO_OPTION); @@ -157,6 +159,11 @@ public class UpdateUtils { } private static boolean isNewerVersion(String latestVersion) { + // If we're testing the update utils we want the program to always try to update + if (Utils.getConfigBoolean("testing.always_try_to_update", false)) { + logger.info("isNewerVersion is returning true because the key \"testing.always_try_to_update\" is true"); + return true; + } int[] oldVersions = versionStringToInt(getThisJarVersion()); int[] newVersions = versionStringToInt(latestVersion); if (oldVersions.length < newVersions.length) { @@ -227,17 +234,20 @@ public class UpdateUtils { try (FileOutputStream out = new FileOutputStream(updateFileName)) { out.write(response.bodyAsBytes()); } - String updateHash = createSha256(new File(updateFileName)); - logger.info("Download of new version complete; saved to " + updateFileName); - logger.info("Checking hash of update"); + // Only check the hash if the user hasn't disabled hash checking + if (Utils.getConfigBoolean("security.check_update_hash", true)) { + String updateHash = createSha256(new File(updateFileName)); + logger.info("Download of new version complete; saved to " + updateFileName); + logger.info("Checking hash of update"); - if (!ripmeJson.getString("currentHash").equals(updateHash)) { - logger.error("Error: Update has bad hash"); - logger.debug("Expected hash: " + ripmeJson.getString("currentHash")); - logger.debug("Actual hash: " + updateHash); - throw new IOException("Got bad file hash"); - } else { - logger.info("Hash is good"); + if (!ripmeJson.getString("currentHash").equals(updateHash)) { + logger.error("Error: Update has bad hash"); + logger.debug("Expected hash: " + ripmeJson.getString("currentHash")); + logger.debug("Actual hash: " + updateHash); + throw new IOException("Got bad file hash"); + } else { + logger.info("Hash is good"); + } } if (shouldLaunch) { // Setup updater script