commit
7ab9cbb230
@ -37,6 +37,7 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
|
|||||||
* Built in updater
|
* Built in updater
|
||||||
* Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image)
|
* Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image)
|
||||||
* Skips already downloaded images by default
|
* Skips already downloaded images by default
|
||||||
|
* Can auto skip e-hentai and nhentai albums containing certain tags [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags)
|
||||||
|
|
||||||
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
||||||
|
|
||||||
|
@ -1 +1,2 @@
|
|||||||
mvn clean compile assembly:single
|
mvn clean compile assembly:single
|
||||||
|
mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar
|
2
build.sh
2
build.sh
@ -1,2 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
mvn clean compile assembly:single
|
mvn clean compile assembly:single
|
||||||
|
# Strip the jar of any non-reproducible metadata such as timestamps
|
||||||
|
mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar
|
11
patch.py
11
patch.py
@ -12,11 +12,13 @@ from hashlib import sha256
|
|||||||
|
|
||||||
message = input('message: ')
|
message = input('message: ')
|
||||||
|
|
||||||
|
|
||||||
def get_ripme_json():
|
def get_ripme_json():
|
||||||
with open('ripme.json') as dataFile:
|
with open('ripme.json') as dataFile:
|
||||||
ripmeJson = json.load(dataFile)
|
ripmeJson = json.load(dataFile)
|
||||||
return ripmeJson
|
return ripmeJson
|
||||||
|
|
||||||
|
|
||||||
def update_hash(current_hash):
|
def update_hash(current_hash):
|
||||||
ripmeJson = get_ripme_json()
|
ripmeJson = get_ripme_json()
|
||||||
with open('ripme.json', 'w') as dataFile:
|
with open('ripme.json', 'w') as dataFile:
|
||||||
@ -24,6 +26,7 @@ def update_hash(current_hash):
|
|||||||
print(ripmeJson["currentHash"])
|
print(ripmeJson["currentHash"])
|
||||||
json.dump(ripmeJson, dataFile, indent=4)
|
json.dump(ripmeJson, dataFile, indent=4)
|
||||||
|
|
||||||
|
|
||||||
def update_change_list(message):
|
def update_change_list(message):
|
||||||
ripmeJson = get_ripme_json()
|
ripmeJson = get_ripme_json()
|
||||||
with open('ripme.json', 'w') as dataFile:
|
with open('ripme.json', 'w') as dataFile:
|
||||||
@ -72,11 +75,10 @@ dataFile = open("ripme.json", "w")
|
|||||||
dataFile.write(outputContent)
|
dataFile.write(outputContent)
|
||||||
dataFile.close()
|
dataFile.close()
|
||||||
|
|
||||||
subprocess.call(['git', 'add', '-u'])
|
|
||||||
subprocess.call(['git', 'commit', '-m', commitMessage])
|
|
||||||
subprocess.call(['git', 'tag', nextVersion])
|
|
||||||
print("Building ripme")
|
print("Building ripme")
|
||||||
subprocess.call(["mvn", "clean", "compile", "assembly:single"])
|
subprocess.call(["mvn", "clean", "compile", "assembly:single"])
|
||||||
|
print("Stripping jar")
|
||||||
|
subprocess.call(["mvn", "io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar"])
|
||||||
print("Hashing .jar file")
|
print("Hashing .jar file")
|
||||||
openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb")
|
openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb")
|
||||||
readFile = openedFile.read()
|
readFile = openedFile.read()
|
||||||
@ -84,3 +86,6 @@ file_hash = sha256(readFile).hexdigest()
|
|||||||
print("Hash is: {}".format(file_hash))
|
print("Hash is: {}".format(file_hash))
|
||||||
print("Updating hash")
|
print("Updating hash")
|
||||||
update_hash(file_hash)
|
update_hash(file_hash)
|
||||||
|
subprocess.call(['git', 'add', '-u'])
|
||||||
|
subprocess.call(['git', 'commit', '-m', commitMessage])
|
||||||
|
subprocess.call(['git', 'tag', nextVersion])
|
||||||
|
7
pom.xml
7
pom.xml
@ -4,7 +4,7 @@
|
|||||||
<groupId>com.rarchives.ripme</groupId>
|
<groupId>com.rarchives.ripme</groupId>
|
||||||
<artifactId>ripme</artifactId>
|
<artifactId>ripme</artifactId>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<version>1.7.50</version>
|
<version>1.7.51</version>
|
||||||
<name>ripme</name>
|
<name>ripme</name>
|
||||||
<url>http://rip.rarchives.com</url>
|
<url>http://rip.rarchives.com</url>
|
||||||
<properties>
|
<properties>
|
||||||
@ -61,6 +61,11 @@
|
|||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>io.github.zlika</groupId>
|
||||||
|
<artifactId>reproducible-build-maven-plugin</artifactId>
|
||||||
|
<version>0.6</version>
|
||||||
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
96
release.py
Normal file
96
release.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from hashlib import sha256
|
||||||
|
from github import Github
|
||||||
|
import json
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Make a new ripme release on github")
|
||||||
|
parser.add_argument("-f", "--file", help="Path to the version of ripme to release")
|
||||||
|
parser.add_argument("-t", "--token", help="Your github personal access token")
|
||||||
|
parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true")
|
||||||
|
parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
input = raw_input
|
||||||
|
except NameError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Make sure the file the user selected is a jar
|
||||||
|
def isJar(filename):
|
||||||
|
if debug:
|
||||||
|
print("Checking if {} is a jar file".format(filename))
|
||||||
|
return filename.endswith("jar")
|
||||||
|
|
||||||
|
|
||||||
|
# Returns true if last entry to the "changeList" section of ripme.json is in the format of $number.$number.$number: and
|
||||||
|
# false if not
|
||||||
|
def isValidCommitMessage(message):
|
||||||
|
if debug:
|
||||||
|
print("Checking if {} matchs pattern ^\d+\.\d+\.\d+:".format(message))
|
||||||
|
pattern = re.compile("^\d+\.\d+\.\d+:")
|
||||||
|
return re.match(pattern, message)
|
||||||
|
|
||||||
|
|
||||||
|
ripmeJson = json.loads(open("ripme.json").read())
|
||||||
|
fileToUploadPath = args.file
|
||||||
|
InNoninteractiveMode = args.non_interactive
|
||||||
|
commitMessage = ripmeJson.get("changeList")[0]
|
||||||
|
releaseVersion = ripmeJson.get("latestVersion")
|
||||||
|
debug = args.debug
|
||||||
|
accessToken = args.token
|
||||||
|
repoOwner = "ripmeapp"
|
||||||
|
repoName = "ripme"
|
||||||
|
|
||||||
|
if not os.path.isfile(fileToUploadPath):
|
||||||
|
print("[!] Error: {} does not exist".format(fileToUploadPath))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not isJar(fileToUploadPath):
|
||||||
|
print("[!] Error: {} is not a jar file!".format(fileToUploadPath))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not isValidCommitMessage(commitMessage):
|
||||||
|
print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
ripmeUpdate = open(fileToUploadPath, mode='rb').read()
|
||||||
|
|
||||||
|
# The hash that we expect the update to have
|
||||||
|
expectedHash = ripmeJson.get("currentHash")
|
||||||
|
|
||||||
|
# The actual hash of the file on disk
|
||||||
|
actualHash = sha256(ripmeUpdate).hexdigest()
|
||||||
|
|
||||||
|
# Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will
|
||||||
|
# cause ripme to refuse to install the update for all users who haven't disabled update hash checking
|
||||||
|
if expectedHash != actualHash:
|
||||||
|
print("[!] Error: expected hash of file and actual hash differ")
|
||||||
|
print("[!] Expected hash is {}".format(expectedHash))
|
||||||
|
print("[!] Actual hash is {}".format(actualHash))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Ask the user to review the information before we precede
|
||||||
|
# This only runs in we're in interactive mode
|
||||||
|
if not InNoninteractiveMode:
|
||||||
|
print("File path: {}".format(fileToUploadPath))
|
||||||
|
print("Release title: {}".format(commitMessage))
|
||||||
|
print("Repo: {}/{}".format(repoOwner, repoName))
|
||||||
|
input("\nPlease review the information above and ensure it is correct and then press enter")
|
||||||
|
|
||||||
|
print("Accessing github using token")
|
||||||
|
g = Github(accessToken)
|
||||||
|
|
||||||
|
|
||||||
|
print("Creating release")
|
||||||
|
release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "")
|
||||||
|
|
||||||
|
print("Uploading file")
|
||||||
|
release.upload_asset(fileToUploadPath, "ripme.jar")
|
@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"currentHash": "f6e1e6c931abfbeffdd37dabb65f83e4335ca11ccc017f31e1d835ee6e6bec7a",
|
"currentHash": "aadb71bf5cdf46fe92e270b50a55c8d8d7200a6dd304a4c2ac9f68cddc687d7e",
|
||||||
"changeList": [
|
"changeList": [
|
||||||
|
"1.7.51: Fixed instagram ripper; Added the ability to rip from vsco profiles; Fixed TheyiffgalleryRipper; Can now update ripme using the -j flag; added script to automate releases; Code style fixes",
|
||||||
"1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new",
|
"1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new",
|
||||||
"1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked",
|
"1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked",
|
||||||
"1.7.48: Fixed instagram ripper; Added Korean translation; Added quickQueue support to nhentairipper; Rewrote nhentairipper to be faster; myhentaicomics ripper now requests proper url when downloading images; Can now include comments in url files; Added the ability to blacklist tags on e-hentai.org",
|
"1.7.48: Fixed instagram ripper; Added Korean translation; Added quickQueue support to nhentairipper; Rewrote nhentairipper to be faster; myhentaicomics ripper now requests proper url when downloading images; Can now include comments in url files; Added the ability to blacklist tags on e-hentai.org",
|
||||||
@ -222,5 +223,5 @@
|
|||||||
"1.0.3: Added VK.com ripper",
|
"1.0.3: Added VK.com ripper",
|
||||||
"1.0.1: Added auto-update functionality"
|
"1.0.1: Added auto-update functionality"
|
||||||
],
|
],
|
||||||
"latestVersion": "1.7.50"
|
"latestVersion": "1.7.51"
|
||||||
}
|
}
|
@ -81,7 +81,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
int index = 0;
|
int index = 0;
|
||||||
int textindex = 0;
|
int textindex = 0;
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||||
Document doc = getFirstPage();
|
Document doc = getFirstPage();
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
|
|
||||||
for (String imageURL : imageURLs) {
|
for (String imageURL : imageURLs) {
|
||||||
index += 1;
|
index += 1;
|
||||||
logger.debug("Found image url #" + index + ": " + imageURL);
|
LOGGER.debug("Found image url #" + index + ": " + imageURL);
|
||||||
downloadURL(new URL(imageURL), index);
|
downloadURL(new URL(imageURL), index);
|
||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
break;
|
break;
|
||||||
@ -125,16 +125,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
|
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
|
||||||
logger.debug("Fetching description(s) from " + doc.location());
|
LOGGER.debug("Fetching description(s) from " + doc.location());
|
||||||
List<String> textURLs = getDescriptionsFromPage(doc);
|
List<String> textURLs = getDescriptionsFromPage(doc);
|
||||||
if (!textURLs.isEmpty()) {
|
if (!textURLs.isEmpty()) {
|
||||||
logger.debug("Found description link(s) from " + doc.location());
|
LOGGER.debug("Found description link(s) from " + doc.location());
|
||||||
for (String textURL : textURLs) {
|
for (String textURL : textURLs) {
|
||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
textindex += 1;
|
textindex += 1;
|
||||||
logger.debug("Getting description from " + textURL);
|
LOGGER.debug("Getting description from " + textURL);
|
||||||
String[] tempDesc = getDescription(textURL,doc);
|
String[] tempDesc = getDescription(textURL,doc);
|
||||||
if (tempDesc != null) {
|
if (tempDesc != null) {
|
||||||
if (Utils.getConfigBoolean("file.overwrite", false) || !(new File(
|
if (Utils.getConfigBoolean("file.overwrite", false) || !(new File(
|
||||||
@ -144,11 +144,11 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
+ getPrefix(index)
|
+ getPrefix(index)
|
||||||
+ (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))
|
+ (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))
|
||||||
+ ".txt").exists())) {
|
+ ".txt").exists())) {
|
||||||
logger.debug("Got description from " + textURL);
|
LOGGER.debug("Got description from " + textURL);
|
||||||
saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))));
|
saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))));
|
||||||
sleep(descSleepTime());
|
sleep(descSleepTime());
|
||||||
} else {
|
} else {
|
||||||
logger.debug("Description from " + textURL + " already exists.");
|
LOGGER.debug("Description from " + textURL + " already exists.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,14 +164,14 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
||||||
doc = getNextPage(doc);
|
doc = getNextPage(doc);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Can't get next page: " + e.getMessage());
|
LOGGER.info("Can't get next page: " + e.getMessage());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If they're using a thread pool, wait for it.
|
// If they're using a thread pool, wait for it.
|
||||||
if (getThreadPool() != null) {
|
if (getThreadPool() != null) {
|
||||||
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||||
getThreadPool().waitForThreads();
|
getThreadPool().waitForThreads();
|
||||||
}
|
}
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
@ -237,12 +237,12 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
|||||||
out.write(text.getBytes());
|
out.write(text.getBytes());
|
||||||
out.close();
|
out.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||||
if (!saveFileAs.getParentFile().exists()) {
|
if (!saveFileAs.getParentFile().exists()) {
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||||
saveFileAs.getParentFile().mkdirs();
|
saveFileAs.getParentFile().mkdirs();
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -50,7 +50,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
|||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
int index = 0;
|
int index = 0;
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||||
JSONObject json = getFirstPage();
|
JSONObject json = getFirstPage();
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
index += 1;
|
index += 1;
|
||||||
logger.debug("Found image url #" + index+ ": " + imageURL);
|
LOGGER.debug("Found image url #" + index+ ": " + imageURL);
|
||||||
downloadURL(new URL(imageURL), index);
|
downloadURL(new URL(imageURL), index);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,14 +91,14 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
|||||||
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
||||||
json = getNextPage(json);
|
json = getNextPage(json);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Can't get next page: " + e.getMessage());
|
LOGGER.info("Can't get next page: " + e.getMessage());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If they're using a thread pool, wait for it.
|
// If they're using a thread pool, wait for it.
|
||||||
if (getThreadPool() != null) {
|
if (getThreadPool() != null) {
|
||||||
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||||
getThreadPool().waitForThreads();
|
getThreadPool().waitForThreads();
|
||||||
}
|
}
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
|
@ -27,7 +27,7 @@ public abstract class AbstractRipper
|
|||||||
extends Observable
|
extends Observable
|
||||||
implements RipperInterface, Runnable {
|
implements RipperInterface, Runnable {
|
||||||
|
|
||||||
protected static final Logger logger = Logger.getLogger(AbstractRipper.class);
|
protected static final Logger LOGGER = Logger.getLogger(AbstractRipper.class);
|
||||||
private final String URLHistoryFile = Utils.getURLHistoryFile();
|
private final String URLHistoryFile = Utils.getURLHistoryFile();
|
||||||
|
|
||||||
public static final String USER_AGENT =
|
public static final String USER_AGENT =
|
||||||
@ -77,11 +77,11 @@ public abstract class AbstractRipper
|
|||||||
try {
|
try {
|
||||||
File file = new File(URLHistoryFile);
|
File file = new File(URLHistoryFile);
|
||||||
if (!new File(Utils.getConfigDir()).exists()) {
|
if (!new File(Utils.getConfigDir()).exists()) {
|
||||||
logger.error("Config dir doesn't exist");
|
LOGGER.error("Config dir doesn't exist");
|
||||||
logger.info("Making config dir");
|
LOGGER.info("Making config dir");
|
||||||
boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs();
|
boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs();
|
||||||
if (!couldMakeDir) {
|
if (!couldMakeDir) {
|
||||||
logger.error("Couldn't make config dir");
|
LOGGER.error("Couldn't make config dir");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -89,12 +89,12 @@ public abstract class AbstractRipper
|
|||||||
if (!file.exists()) {
|
if (!file.exists()) {
|
||||||
boolean couldMakeDir = file.createNewFile();
|
boolean couldMakeDir = file.createNewFile();
|
||||||
if (!couldMakeDir) {
|
if (!couldMakeDir) {
|
||||||
logger.error("Couldn't url history file");
|
LOGGER.error("Couldn't url history file");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!file.canWrite()) {
|
if (!file.canWrite()) {
|
||||||
logger.error("Can't write to url history file: " + URLHistoryFile);
|
LOGGER.error("Can't write to url history file: " + URLHistoryFile);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
fw = new FileWriter(file.getAbsoluteFile(), true);
|
fw = new FileWriter(file.getAbsoluteFile(), true);
|
||||||
@ -247,10 +247,10 @@ public abstract class AbstractRipper
|
|||||||
try {
|
try {
|
||||||
stopCheck();
|
stopCheck();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.debug("Ripper has been stopped");
|
LOGGER.debug("Ripper has been stopped");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||||
String saveAs = getFileName(url, fileName, extension);
|
String saveAs = getFileName(url, fileName, extension);
|
||||||
File saveFileAs;
|
File saveFileAs;
|
||||||
try {
|
try {
|
||||||
@ -265,19 +265,19 @@ public abstract class AbstractRipper
|
|||||||
+ prefix
|
+ prefix
|
||||||
+ saveAs);
|
+ saveAs);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
|
LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("Downloading " + url + " to " + saveFileAs);
|
LOGGER.debug("Downloading " + url + " to " + saveFileAs);
|
||||||
if (!saveFileAs.getParentFile().exists()) {
|
if (!saveFileAs.getParentFile().exists()) {
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||||
saveFileAs.getParentFile().mkdirs();
|
saveFileAs.getParentFile().mkdirs();
|
||||||
}
|
}
|
||||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||||
try {
|
try {
|
||||||
writeDownloadedURL(url.toExternalForm() + "\n");
|
writeDownloadedURL(url.toExternalForm() + "\n");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.debug("Unable to write URL history file");
|
LOGGER.debug("Unable to write URL history file");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
|
return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
|
||||||
@ -357,7 +357,7 @@ public abstract class AbstractRipper
|
|||||||
* Waits for downloading threads to complete.
|
* Waits for downloading threads to complete.
|
||||||
*/
|
*/
|
||||||
protected void waitForThreads() {
|
protected void waitForThreads() {
|
||||||
logger.debug("Waiting for threads to finish");
|
LOGGER.debug("Waiting for threads to finish");
|
||||||
completed = false;
|
completed = false;
|
||||||
threadPool.waitForThreads();
|
threadPool.waitForThreads();
|
||||||
checkIfComplete();
|
checkIfComplete();
|
||||||
@ -409,13 +409,13 @@ public abstract class AbstractRipper
|
|||||||
*/
|
*/
|
||||||
void checkIfComplete() {
|
void checkIfComplete() {
|
||||||
if (observer == null) {
|
if (observer == null) {
|
||||||
logger.debug("observer is null");
|
LOGGER.debug("observer is null");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!completed) {
|
if (!completed) {
|
||||||
completed = true;
|
completed = true;
|
||||||
logger.info(" Rip completed!");
|
LOGGER.info(" Rip completed!");
|
||||||
|
|
||||||
RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount());
|
RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount());
|
||||||
RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc);
|
RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc);
|
||||||
@ -424,7 +424,7 @@ public abstract class AbstractRipper
|
|||||||
Logger rootLogger = Logger.getRootLogger();
|
Logger rootLogger = Logger.getRootLogger();
|
||||||
FileAppender fa = (FileAppender) rootLogger.getAppender("FILE");
|
FileAppender fa = (FileAppender) rootLogger.getAppender("FILE");
|
||||||
if (fa != null) {
|
if (fa != null) {
|
||||||
logger.debug("Changing log file back to 'ripme.log'");
|
LOGGER.debug("Changing log file back to 'ripme.log'");
|
||||||
fa.setFile("ripme.log");
|
fa.setFile("ripme.log");
|
||||||
fa.activateOptions();
|
fa.activateOptions();
|
||||||
}
|
}
|
||||||
@ -433,7 +433,7 @@ public abstract class AbstractRipper
|
|||||||
try {
|
try {
|
||||||
Desktop.getDesktop().open(new File(urlFile));
|
Desktop.getDesktop().open(new File(urlFile));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Error while opening " + urlFile, e);
|
LOGGER.warn("Error while opening " + urlFile, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -488,7 +488,7 @@ public abstract class AbstractRipper
|
|||||||
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
|
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
|
||||||
try {
|
try {
|
||||||
AlbumRipper ripper = (AlbumRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
AlbumRipper ripper = (AlbumRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
||||||
logger.debug("Found album ripper: " + ripper.getClass().getName());
|
LOGGER.debug("Found album ripper: " + ripper.getClass().getName());
|
||||||
return ripper;
|
return ripper;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Incompatible rippers *will* throw exceptions during instantiation.
|
// Incompatible rippers *will* throw exceptions during instantiation.
|
||||||
@ -497,7 +497,7 @@ public abstract class AbstractRipper
|
|||||||
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
|
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
|
||||||
try {
|
try {
|
||||||
VideoRipper ripper = (VideoRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
VideoRipper ripper = (VideoRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
||||||
logger.debug("Found video ripper: " + ripper.getClass().getName());
|
LOGGER.debug("Found video ripper: " + ripper.getClass().getName());
|
||||||
return ripper;
|
return ripper;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Incompatible rippers *will* throw exceptions during instantiation.
|
// Incompatible rippers *will* throw exceptions during instantiation.
|
||||||
@ -554,11 +554,11 @@ public abstract class AbstractRipper
|
|||||||
try {
|
try {
|
||||||
rip();
|
rip();
|
||||||
} catch (HttpStatusException e) {
|
} catch (HttpStatusException e) {
|
||||||
logger.error("Got exception while running ripper:", e);
|
LOGGER.error("Got exception while running ripper:", e);
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
|
sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Got exception while running ripper:", e);
|
LOGGER.error("Got exception while running ripper:", e);
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
|
sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
|
||||||
} finally {
|
} finally {
|
||||||
@ -571,10 +571,10 @@ public abstract class AbstractRipper
|
|||||||
private void cleanup() {
|
private void cleanup() {
|
||||||
if (this.workingDir.list().length == 0) {
|
if (this.workingDir.list().length == 0) {
|
||||||
// No files, delete the dir
|
// No files, delete the dir
|
||||||
logger.info("Deleting empty directory " + this.workingDir);
|
LOGGER.info("Deleting empty directory " + this.workingDir);
|
||||||
boolean deleteResult = this.workingDir.delete();
|
boolean deleteResult = this.workingDir.delete();
|
||||||
if (!deleteResult) {
|
if (!deleteResult) {
|
||||||
logger.error("Unable to delete empty directory " + this.workingDir);
|
LOGGER.error("Unable to delete empty directory " + this.workingDir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -589,11 +589,11 @@ public abstract class AbstractRipper
|
|||||||
*/
|
*/
|
||||||
protected boolean sleep(int milliseconds) {
|
protected boolean sleep(int milliseconds) {
|
||||||
try {
|
try {
|
||||||
logger.debug("Sleeping " + milliseconds + "ms");
|
LOGGER.debug("Sleeping " + milliseconds + "ms");
|
||||||
Thread.sleep(milliseconds);
|
Thread.sleep(milliseconds);
|
||||||
return true;
|
return true;
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("Interrupted while waiting to load next page", e);
|
LOGGER.error("Interrupted while waiting to load next page", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -607,7 +607,7 @@ public abstract class AbstractRipper
|
|||||||
|
|
||||||
/** Methods for detecting when we're running a test. */
|
/** Methods for detecting when we're running a test. */
|
||||||
public void markAsTest() {
|
public void markAsTest() {
|
||||||
logger.debug("THIS IS A TEST RIP");
|
LOGGER.debug("THIS IS A TEST RIP");
|
||||||
thisIsATest = true;
|
thisIsATest = true;
|
||||||
}
|
}
|
||||||
protected boolean isThisATest() {
|
protected boolean isThisATest() {
|
||||||
|
@ -62,7 +62,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
|| itemsCompleted.containsKey(url)
|
|| itemsCompleted.containsKey(url)
|
||||||
|| itemsErrored.containsKey(url) )) {
|
|| itemsErrored.containsKey(url) )) {
|
||||||
// Item is already downloaded/downloading, skip it.
|
// Item is already downloaded/downloading, skip it.
|
||||||
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
||||||
@ -76,7 +76,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
itemsCompleted.put(url, new File(urlFile));
|
itemsCompleted.put(url, new File(urlFile));
|
||||||
observer.update(this, msg);
|
observer.update(this, msg);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Error while writing to " + urlFile, e);
|
LOGGER.error("Error while writing to " + urlFile, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -128,7 +128,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
|
|
||||||
checkIfComplete();
|
checkIfComplete();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Exception while updating observer: ", e);
|
LOGGER.error("Exception while updating observer: ", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
} else {
|
} else {
|
||||||
title = super.getAlbumTitle(this.url);
|
title = super.getAlbumTitle(this.url);
|
||||||
}
|
}
|
||||||
logger.debug("Using album title '" + title + "'");
|
LOGGER.debug("Using album title '" + title + "'");
|
||||||
|
|
||||||
title = Utils.filesystemSafe(title);
|
title = Utils.filesystemSafe(title);
|
||||||
path += title;
|
path += title;
|
||||||
@ -204,10 +204,10 @@ public abstract class AlbumRipper extends AbstractRipper {
|
|||||||
|
|
||||||
this.workingDir = new File(path);
|
this.workingDir = new File(path);
|
||||||
if (!this.workingDir.exists()) {
|
if (!this.workingDir.exists()) {
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
||||||
this.workingDir.mkdirs();
|
this.workingDir.mkdirs();
|
||||||
}
|
}
|
||||||
logger.debug("Set working directory to: " + this.workingDir);
|
LOGGER.debug("Set working directory to: " + this.workingDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
package com.rarchives.ripme.ripper;
|
package com.rarchives.ripme.ripper;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -7,29 +11,27 @@ import java.net.MalformedURLException;
|
|||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
|
||||||
import com.rarchives.ripme.utils.Utils;
|
|
||||||
import com.sun.org.apache.xpath.internal.operations.Bool;
|
|
||||||
|
|
||||||
public abstract class VideoRipper extends AbstractRipper {
|
public abstract class VideoRipper extends AbstractRipper {
|
||||||
|
|
||||||
private int bytesTotal = 1,
|
private int bytesTotal = 1;
|
||||||
bytesCompleted = 1;
|
private int bytesCompleted = 1;
|
||||||
|
|
||||||
protected VideoRipper(URL url) throws IOException {
|
protected VideoRipper(URL url) throws IOException {
|
||||||
super(url);
|
super(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
public abstract boolean canRip(URL url);
|
|
||||||
public abstract void rip() throws IOException;
|
public abstract void rip() throws IOException;
|
||||||
|
|
||||||
public abstract String getHost();
|
public abstract String getHost();
|
||||||
|
|
||||||
public abstract String getGID(URL url) throws MalformedURLException;
|
public abstract String getGID(URL url) throws MalformedURLException;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setBytesTotal(int bytes) {
|
public void setBytesTotal(int bytes) {
|
||||||
this.bytesTotal = bytes;
|
this.bytesTotal = bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setBytesCompleted(int bytes) {
|
public void setBytesCompleted(int bytes) {
|
||||||
this.bytesCompleted = bytes;
|
this.bytesCompleted = bytes;
|
||||||
@ -53,15 +55,14 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
|
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
|
||||||
observer.update(this, msg);
|
observer.update(this, msg);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Error while writing to " + urlFile, e);
|
LOGGER.error("Error while writing to " + urlFile, e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
if (isThisATest()) {
|
if (isThisATest()) {
|
||||||
// Tests shouldn't download the whole video
|
// Tests shouldn't download the whole video
|
||||||
// Just change this.url to the download URL so the test knows we found it.
|
// Just change this.url to the download URL so the test knows we found it.
|
||||||
logger.debug("Test rip, found URL: " + url);
|
LOGGER.debug("Test rip, found URL: " + url);
|
||||||
this.url = url;
|
this.url = url;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -71,34 +72,36 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies, Boolean getFileExtFromMIME) {
|
||||||
return addURLToDownload(url, saveAs);
|
return addURLToDownload(url, saveAs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates & sets working directory based on URL.
|
* Creates & sets working directory based on URL.
|
||||||
* @param url
|
*
|
||||||
* Target URL
|
* @param url Target URL
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setWorkingDir(URL url) throws IOException {
|
public void setWorkingDir(URL url) throws IOException {
|
||||||
String path = Utils.getWorkingDirectory().getCanonicalPath();
|
String path = Utils.getWorkingDirectory().getCanonicalPath();
|
||||||
|
|
||||||
if (!path.endsWith(File.separator)) {
|
if (!path.endsWith(File.separator)) {
|
||||||
path += File.separator;
|
path += File.separator;
|
||||||
}
|
}
|
||||||
|
|
||||||
path += "videos" + File.separator;
|
path += "videos" + File.separator;
|
||||||
this.workingDir = new File(path);
|
workingDir = new File(path);
|
||||||
if (!this.workingDir.exists()) {
|
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
if (!workingDir.exists()) {
|
||||||
this.workingDir.mkdirs();
|
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(workingDir));
|
||||||
|
workingDir.mkdirs();
|
||||||
}
|
}
|
||||||
logger.debug("Set working directory to: " + this.workingDir);
|
|
||||||
|
LOGGER.debug("Set working directory to: " + workingDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return
|
* @return Returns % of video done downloading.
|
||||||
* Returns % of video done downloading.
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public int getCompletionPercentage() {
|
public int getCompletionPercentage() {
|
||||||
@ -107,16 +110,16 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs if download successfully completed.
|
* Runs if download successfully completed.
|
||||||
* @param url
|
*
|
||||||
* Target URL
|
* @param url Target URL
|
||||||
* @param saveAs
|
* @param saveAs Path to file, including filename.
|
||||||
* Path to file, including filename.
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void downloadCompleted(URL url, File saveAs) {
|
public void downloadCompleted(URL url, File saveAs) {
|
||||||
if (observer == null) {
|
if (observer == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String path = Utils.removeCWD(saveAs);
|
String path = Utils.removeCWD(saveAs);
|
||||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
|
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
|
||||||
@ -124,65 +127,61 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
|
|
||||||
checkIfComplete();
|
checkIfComplete();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Exception while updating observer: ", e);
|
LOGGER.error("Exception while updating observer: ", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs if the download errored somewhere.
|
* Runs if the download errored somewhere.
|
||||||
* @param url
|
*
|
||||||
* Target URL
|
* @param url Target URL
|
||||||
* @param reason
|
* @param reason Reason why the download failed.
|
||||||
* Reason why the download failed.
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void downloadErrored(URL url, String reason) {
|
public void downloadErrored(URL url, String reason) {
|
||||||
if (observer == null) {
|
if (observer == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
|
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
|
||||||
checkIfComplete();
|
checkIfComplete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs if user tries to redownload an already existing File.
|
* Runs if user tries to redownload an already existing File.
|
||||||
* @param url
|
*
|
||||||
* Target URL
|
* @param url Target URL
|
||||||
* @param file
|
* @param file Existing file
|
||||||
* Existing file
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void downloadExists(URL url, File file) {
|
public void downloadExists(URL url, File file) {
|
||||||
if (observer == null) {
|
if (observer == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
|
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
|
||||||
checkIfComplete();
|
checkIfComplete();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the status and changes it to a human-readable form.
|
* Gets the status and changes it to a human-readable form.
|
||||||
* @return
|
*
|
||||||
* Status of current download.
|
* @return Status of current download.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String getStatusText() {
|
public String getStatusText() {
|
||||||
StringBuilder sb = new StringBuilder();
|
return String.valueOf(getCompletionPercentage()) +
|
||||||
sb.append(getCompletionPercentage())
|
"% - " +
|
||||||
.append("% ")
|
Utils.bytesToHumanReadable(bytesCompleted) +
|
||||||
.append(" - ")
|
" / " +
|
||||||
.append(Utils.bytesToHumanReadable(bytesCompleted))
|
Utils.bytesToHumanReadable(bytesTotal);
|
||||||
.append(" / ")
|
|
||||||
.append(Utils.bytesToHumanReadable(bytesTotal));
|
|
||||||
return sb.toString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
/**
|
/**
|
||||||
* Sanitizes URL.
|
* Sanitizes URL.
|
||||||
* Usually just returns itself.
|
* Usually just returns itself.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||||
return url;
|
return url;
|
||||||
}
|
}
|
||||||
@ -195,8 +194,10 @@ public abstract class VideoRipper extends AbstractRipper {
|
|||||||
if (observer == null) {
|
if (observer == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bytesCompleted >= bytesTotal) {
|
if (bytesCompleted >= bytesTotal) {
|
||||||
super.checkIfComplete();
|
super.checkIfComplete();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -9,7 +9,6 @@ import java.util.Map;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.jsoup.Connection.Response;
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import org.jsoup.select.Elements;
|
import org.jsoup.select.Elements;
|
||||||
@ -58,7 +57,7 @@ public class AerisdiesRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
|
return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
|||||||
s = s.replaceAll("var prevCha = null;", "");
|
s = s.replaceAll("var prevCha = null;", "");
|
||||||
s = s.replaceAll("var nextCha = \\.*;", "");
|
s = s.replaceAll("var nextCha = \\.*;", "");
|
||||||
String json = s.replaceAll("var images = ", "").replaceAll(";", "");
|
String json = s.replaceAll("var images = ", "").replaceAll(";", "");
|
||||||
logger.info(s);
|
LOGGER.info(s);
|
||||||
JSONObject images = new JSONObject(json);
|
JSONObject images = new JSONObject(json);
|
||||||
for (int i = 1; i < images.length() +1; i++) {
|
for (int i = 1; i < images.length() +1; i++) {
|
||||||
result.add(images.getString(Integer.toString(i)));
|
result.add(images.getString(Integer.toString(i)));
|
||||||
|
@ -68,11 +68,11 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
String subject = doc.select(".post.op > .postinfo > .subject").first().text();
|
String subject = doc.select(".post.op > .postinfo > .subject").first().text();
|
||||||
return getHost() + "_" + getGID(url) + "_" + subject;
|
return getHost() + "_" + getGID(url) + "_" + subject;
|
||||||
} catch (NullPointerException e) {
|
} catch (NullPointerException e) {
|
||||||
logger.warn("Failed to get thread title from " + url);
|
LOGGER.warn("Failed to get thread title from " + url);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
// Fall back on the GID
|
// Fall back on the GID
|
||||||
return getHost() + "_" + getGID(url);
|
return getHost() + "_" + getGID(url);
|
||||||
@ -144,7 +144,7 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
private boolean isURLBlacklisted(String url) {
|
private boolean isURLBlacklisted(String url) {
|
||||||
for (String blacklist_item : url_piece_blacklist) {
|
for (String blacklist_item : url_piece_blacklist) {
|
||||||
if (url.contains(blacklist_item)) {
|
if (url.contains(blacklist_item)) {
|
||||||
logger.debug("Skipping link that contains '"+blacklist_item+"': " + url);
|
LOGGER.debug("Skipping link that contains '"+blacklist_item+"': " + url);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -185,7 +185,7 @@ public class ChanRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
// Don't download the same URL twice
|
// Don't download the same URL twice
|
||||||
if (imageURLs.contains(href)) {
|
if (imageURLs.contains(href)) {
|
||||||
logger.debug("Already attempted: " + href);
|
LOGGER.debug("Already attempted: " + href);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
imageURLs.add(href);
|
imageURLs.add(href);
|
||||||
|
@ -63,7 +63,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + title.trim();
|
return getHost() + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -122,14 +122,14 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||||
|
|
||||||
if (username == null || password == null) {
|
if (username == null || password == null) {
|
||||||
logger.debug("No DeviantArt login provided.");
|
LOGGER.debug("No DeviantArt login provided.");
|
||||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||||
} else {
|
} else {
|
||||||
// Attempt Login
|
// Attempt Login
|
||||||
try {
|
try {
|
||||||
cookies = loginToDeviantart();
|
cookies = loginToDeviantart();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Failed to login: ", e);
|
LOGGER.warn("Failed to login: ", e);
|
||||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -161,7 +161,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\""));
|
script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\""));
|
||||||
return script.replace("\\/", "/");
|
return script.replace("\\/", "/");
|
||||||
} catch (StringIndexOutOfBoundsException e) {
|
} catch (StringIndexOutOfBoundsException e) {
|
||||||
logger.debug("Unable to get json link from " + page.location());
|
LOGGER.debug("Unable to get json link from " + page.location());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -204,7 +204,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (triedURLs.contains(fullSize)) {
|
if (triedURLs.contains(fullSize)) {
|
||||||
logger.warn("Already tried to download " + fullSize);
|
LOGGER.warn("Already tried to download " + fullSize);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
triedURLs.add(fullSize);
|
triedURLs.add(fullSize);
|
||||||
@ -222,7 +222,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
List<String> textURLs = new ArrayList<>();
|
List<String> textURLs = new ArrayList<>();
|
||||||
// Iterate over all thumbnails
|
// Iterate over all thumbnails
|
||||||
for (Element thumb : page.select("div.zones-container span.thumb")) {
|
for (Element thumb : page.select("div.zones-container span.thumb")) {
|
||||||
logger.info(thumb.attr("href"));
|
LOGGER.info(thumb.attr("href"));
|
||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -256,7 +256,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
if (!sleep(PAGE_SLEEP_TIME)) {
|
if (!sleep(PAGE_SLEEP_TIME)) {
|
||||||
throw new IOException("Interrupted while waiting to load next page: " + nextPage);
|
throw new IOException("Interrupted while waiting to load next page: " + nextPage);
|
||||||
}
|
}
|
||||||
logger.info("Found next page: " + nextPage);
|
LOGGER.info("Found next page: " + nextPage);
|
||||||
return Http.url(nextPage)
|
return Http.url(nextPage)
|
||||||
.cookies(cookies)
|
.cookies(cookies)
|
||||||
.get();
|
.get();
|
||||||
@ -351,7 +351,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize};
|
return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize};
|
||||||
// TODO Make this not make a newline if someone just types \n into the description.
|
// TODO Make this not make a newline if someone just types \n into the description.
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
logger.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
|
LOGGER.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -379,7 +379,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
if (!els.isEmpty()) {
|
if (!els.isEmpty()) {
|
||||||
// Large image
|
// Large image
|
||||||
fsimage = els.get(0).attr("src");
|
fsimage = els.get(0).attr("src");
|
||||||
logger.info("Found large-scale: " + fsimage);
|
LOGGER.info("Found large-scale: " + fsimage);
|
||||||
if (fsimage.contains("//orig")) {
|
if (fsimage.contains("//orig")) {
|
||||||
return fsimage;
|
return fsimage;
|
||||||
}
|
}
|
||||||
@ -389,7 +389,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
if (!els.isEmpty()) {
|
if (!els.isEmpty()) {
|
||||||
// Full-size image
|
// Full-size image
|
||||||
String downloadLink = els.get(0).attr("href");
|
String downloadLink = els.get(0).attr("href");
|
||||||
logger.info("Found download button link: " + downloadLink);
|
LOGGER.info("Found download button link: " + downloadLink);
|
||||||
HttpURLConnection con = (HttpURLConnection) new URL(downloadLink).openConnection();
|
HttpURLConnection con = (HttpURLConnection) new URL(downloadLink).openConnection();
|
||||||
con.setRequestProperty("Referer",this.url.toString());
|
con.setRequestProperty("Referer",this.url.toString());
|
||||||
String cookieString = "";
|
String cookieString = "";
|
||||||
@ -406,7 +406,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
con.disconnect();
|
con.disconnect();
|
||||||
if (location.contains("//orig")) {
|
if (location.contains("//orig")) {
|
||||||
fsimage = location;
|
fsimage = location;
|
||||||
logger.info("Found image download: " + location);
|
LOGGER.info("Found image download: " + location);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (fsimage != null) {
|
if (fsimage != null) {
|
||||||
@ -415,9 +415,9 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
|||||||
throw new IOException("No download page found");
|
throw new IOException("No download page found");
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
try {
|
try {
|
||||||
logger.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
|
LOGGER.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
|
||||||
String lessThanFull = thumbToFull(thumb, false);
|
String lessThanFull = thumbToFull(thumb, false);
|
||||||
logger.info("Falling back to less-than-full-size image " + lessThanFull);
|
LOGGER.info("Falling back to less-than-full-size image " + lessThanFull);
|
||||||
return lessThanFull;
|
return lessThanFull;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -70,7 +70,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + elems.first().text();
|
return getHost() + "_" + elems.first().text();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
int retries = 3;
|
int retries = 3;
|
||||||
while (true) {
|
while (true) {
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
||||||
logger.info("Retrieving " + url);
|
LOGGER.info("Retrieving " + url);
|
||||||
doc = Http.url(url)
|
doc = Http.url(url)
|
||||||
.referrer(this.url)
|
.referrer(this.url)
|
||||||
.cookies(cookies)
|
.cookies(cookies)
|
||||||
@ -112,7 +112,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
if (retries == 0) {
|
if (retries == 0) {
|
||||||
throw new IOException("Hit rate limit and maximum number of retries, giving up");
|
throw new IOException("Hit rate limit and maximum number of retries, giving up");
|
||||||
}
|
}
|
||||||
logger.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
|
LOGGER.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
|
||||||
retries--;
|
retries--;
|
||||||
try {
|
try {
|
||||||
Thread.sleep(IP_BLOCK_SLEEP_TIME);
|
Thread.sleep(IP_BLOCK_SLEEP_TIME);
|
||||||
@ -137,7 +137,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
if (blackListedTags == null) {
|
if (blackListedTags == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
logger.info("Blacklisted tags " + blackListedTags[0]);
|
LOGGER.info("Blacklisted tags " + blackListedTags[0]);
|
||||||
List<String> tagsOnPage = getTags(doc);
|
List<String> tagsOnPage = getTags(doc);
|
||||||
for (String tag : blackListedTags) {
|
for (String tag : blackListedTags) {
|
||||||
for (String pageTag : tagsOnPage) {
|
for (String pageTag : tagsOnPage) {
|
||||||
@ -153,9 +153,9 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
private List<String> getTags(Document doc) {
|
private List<String> getTags(Document doc) {
|
||||||
List<String> tags = new ArrayList<>();
|
List<String> tags = new ArrayList<>();
|
||||||
logger.info("Getting tags");
|
LOGGER.info("Getting tags");
|
||||||
for (Element tag : doc.select("td > div > a")) {
|
for (Element tag : doc.select("td > div > a")) {
|
||||||
logger.info("Found tag " + tag.text());
|
LOGGER.info("Found tag " + tag.text());
|
||||||
tags.add(tag.text());
|
tags.add(tag.text());
|
||||||
}
|
}
|
||||||
return tags;
|
return tags;
|
||||||
@ -168,7 +168,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
albumDoc = getPageWithRetries(this.url);
|
albumDoc = getPageWithRetries(this.url);
|
||||||
}
|
}
|
||||||
this.lastURL = this.url.toExternalForm();
|
this.lastURL = this.url.toExternalForm();
|
||||||
logger.info("Checking blacklist");
|
LOGGER.info("Checking blacklist");
|
||||||
String blacklistedTag = checkTags(albumDoc, Utils.getConfigStringArray("ehentai.blacklist.tags"));
|
String blacklistedTag = checkTags(albumDoc, Utils.getConfigStringArray("ehentai.blacklist.tags"));
|
||||||
if (blacklistedTag != null) {
|
if (blacklistedTag != null) {
|
||||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
|
||||||
@ -187,13 +187,13 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
// Find next page
|
// Find next page
|
||||||
Elements hrefs = doc.select(".ptt a");
|
Elements hrefs = doc.select(".ptt a");
|
||||||
if (hrefs.isEmpty()) {
|
if (hrefs.isEmpty()) {
|
||||||
logger.info("doc: " + doc.html());
|
LOGGER.info("doc: " + doc.html());
|
||||||
throw new IOException("No navigation links found");
|
throw new IOException("No navigation links found");
|
||||||
}
|
}
|
||||||
// Ensure next page is different from the current page
|
// Ensure next page is different from the current page
|
||||||
String nextURL = hrefs.last().attr("href");
|
String nextURL = hrefs.last().attr("href");
|
||||||
if (nextURL.equals(this.lastURL)) {
|
if (nextURL.equals(this.lastURL)) {
|
||||||
logger.info("lastURL = nextURL : " + nextURL);
|
LOGGER.info("lastURL = nextURL : " + nextURL);
|
||||||
throw new IOException("Reached last page of results");
|
throw new IOException("Reached last page of results");
|
||||||
}
|
}
|
||||||
// Sleep before loading next page
|
// Sleep before loading next page
|
||||||
@ -223,7 +223,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
Thread.sleep(IMAGE_SLEEP_TIME);
|
Thread.sleep(IMAGE_SLEEP_TIME);
|
||||||
}
|
}
|
||||||
catch (InterruptedException e) {
|
catch (InterruptedException e) {
|
||||||
logger.warn("Interrupted while waiting to load next image", e);
|
LOGGER.warn("Interrupted while waiting to load next image", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,13 +259,13 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
// Attempt to find image elsewise (Issue #41)
|
// Attempt to find image elsewise (Issue #41)
|
||||||
images = doc.select("img#img");
|
images = doc.select("img#img");
|
||||||
if (images.isEmpty()) {
|
if (images.isEmpty()) {
|
||||||
logger.warn("Image not found at " + this.url);
|
LOGGER.warn("Image not found at " + this.url);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Element image = images.first();
|
Element image = images.first();
|
||||||
String imgsrc = image.attr("src");
|
String imgsrc = image.attr("src");
|
||||||
logger.info("Found URL " + imgsrc + " via " + images.get(0));
|
LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||||
Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$");
|
Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$");
|
||||||
Matcher m = p.matcher(imgsrc);
|
Matcher m = p.matcher(imgsrc);
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
@ -286,7 +286,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(new URL(imgsrc), prefix);
|
addURLToDownload(new URL(imgsrc), prefix);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + title.trim();
|
return getHost() + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -96,19 +96,19 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
if (thumb.attr("href").contains("/comics/album/")) {
|
if (thumb.attr("href").contains("/comics/album/")) {
|
||||||
String subUrl = "https://www.8muses.com" + thumb.attr("href");
|
String subUrl = "https://www.8muses.com" + thumb.attr("href");
|
||||||
try {
|
try {
|
||||||
logger.info("Retrieving " + subUrl);
|
LOGGER.info("Retrieving " + subUrl);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, subUrl);
|
sendUpdate(STATUS.LOADING_RESOURCE, subUrl);
|
||||||
Document subPage = Http.url(subUrl).get();
|
Document subPage = Http.url(subUrl).get();
|
||||||
// If the page below this one has images this line will download them
|
// If the page below this one has images this line will download them
|
||||||
List<String> subalbumImages = getURLsFromPage(subPage);
|
List<String> subalbumImages = getURLsFromPage(subPage);
|
||||||
logger.info("Found " + subalbumImages.size() + " images in subalbum");
|
LOGGER.info("Found " + subalbumImages.size() + " images in subalbum");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Error while loading subalbum " + subUrl, e);
|
LOGGER.warn("Error while loading subalbum " + subUrl, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (thumb.attr("href").contains("/comics/picture/")) {
|
} else if (thumb.attr("href").contains("/comics/picture/")) {
|
||||||
logger.info("This page is a album");
|
LOGGER.info("This page is a album");
|
||||||
logger.info("Ripping image");
|
LOGGER.info("Ripping image");
|
||||||
if (super.isStopped()) break;
|
if (super.isStopped()) break;
|
||||||
// Find thumbnail image source
|
// Find thumbnail image source
|
||||||
String image = null;
|
String image = null;
|
||||||
@ -122,7 +122,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
imageHref = "https://www.8muses.com" + imageHref;
|
imageHref = "https://www.8muses.com" + imageHref;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
logger.info("Retrieving full-size image location from " + imageHref);
|
LOGGER.info("Retrieving full-size image location from " + imageHref);
|
||||||
image = getFullSizeImage(imageHref);
|
image = getFullSizeImage(imageHref);
|
||||||
URL imageUrl = new URL(image);
|
URL imageUrl = new URL(image);
|
||||||
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||||
@ -134,7 +134,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
x++;
|
x++;
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to get full-size image from " + imageHref);
|
LOGGER.error("Failed to get full-size image from " + imageHref);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -152,7 +152,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
private String getFullSizeImage(String imageUrl) throws IOException {
|
private String getFullSizeImage(String imageUrl) throws IOException {
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, imageUrl);
|
sendUpdate(STATUS.LOADING_RESOURCE, imageUrl);
|
||||||
logger.info("Getting full sized image from " + imageUrl);
|
LOGGER.info("Getting full sized image from " + imageUrl);
|
||||||
Document doc = new Http(imageUrl).get(); // Retrieve the webpage of the image URL
|
Document doc = new Http(imageUrl).get(); // Retrieve the webpage of the image URL
|
||||||
String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page
|
String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page
|
||||||
return "https://www.8muses.com/image/fm/" + imageName;
|
return "https://www.8muses.com/image/fm/" + imageName;
|
||||||
@ -166,14 +166,14 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private String getSubdir(String rawHref) {
|
private String getSubdir(String rawHref) {
|
||||||
logger.info("Raw title: " + rawHref);
|
LOGGER.info("Raw title: " + rawHref);
|
||||||
String title = rawHref;
|
String title = rawHref;
|
||||||
title = title.replaceAll("8muses - Sex and Porn Comics", "");
|
title = title.replaceAll("8muses - Sex and Porn Comics", "");
|
||||||
title = title.replaceAll("\t\t", "");
|
title = title.replaceAll("\t\t", "");
|
||||||
title = title.replaceAll("\n", "");
|
title = title.replaceAll("\n", "");
|
||||||
title = title.replaceAll("\\| ", "");
|
title = title.replaceAll("\\| ", "");
|
||||||
title = title.replace(" ", "-");
|
title = title.replace(" ", "-");
|
||||||
logger.info(title);
|
LOGGER.info(title);
|
||||||
return title;
|
return title;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -139,7 +139,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
video_page = Http.url("eroshae.com" + link.attr("href")).get();
|
video_page = Http.url("eroshae.com" + link.attr("href")).get();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Failed to log link in Jsoup");
|
LOGGER.warn("Failed to log link in Jsoup");
|
||||||
video_page = null;
|
video_page = null;
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ public class EromeRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
|
|
||||||
/** Convert username to UserID. */
|
/** Convert username to UserID. */
|
||||||
private String getUserID(String username) throws IOException {
|
private String getUserID(String username) throws IOException {
|
||||||
logger.info("Fetching user ID for " + username);
|
LOGGER.info("Fetching user ID for " + username);
|
||||||
JSONObject json = new Http("https://api.500px.com/v1/" +
|
JSONObject json = new Http("https://api.500px.com/v1/" +
|
||||||
"users/show" +
|
"users/show" +
|
||||||
"?username=" + username +
|
"?username=" + username +
|
||||||
@ -165,7 +165,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
@Override
|
@Override
|
||||||
public JSONObject getFirstPage() throws IOException {
|
public JSONObject getFirstPage() throws IOException {
|
||||||
URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY);
|
URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY);
|
||||||
logger.debug("apiURL: " + apiURL);
|
LOGGER.debug("apiURL: " + apiURL);
|
||||||
JSONObject json = Http.url(apiURL).getJSON();
|
JSONObject json = Http.url(apiURL).getJSON();
|
||||||
|
|
||||||
if (baseURL.contains("/galleries?")) {
|
if (baseURL.contains("/galleries?")) {
|
||||||
@ -185,7 +185,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
+ "?rpp=100"
|
+ "?rpp=100"
|
||||||
+ "&image_size=5"
|
+ "&image_size=5"
|
||||||
+ "&consumer_key=" + CONSUMER_KEY;
|
+ "&consumer_key=" + CONSUMER_KEY;
|
||||||
logger.info("Loading " + blogURL);
|
LOGGER.info("Loading " + blogURL);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID);
|
sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID);
|
||||||
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
||||||
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
||||||
@ -216,7 +216,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
+ "&rpp=100"
|
+ "&rpp=100"
|
||||||
+ "&image_size=5"
|
+ "&image_size=5"
|
||||||
+ "&consumer_key=" + CONSUMER_KEY;
|
+ "&consumer_key=" + CONSUMER_KEY;
|
||||||
logger.info("Loading " + blogURL);
|
LOGGER.info("Loading " + blogURL);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username);
|
sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username);
|
||||||
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
||||||
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
||||||
@ -268,20 +268,20 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
Document doc;
|
Document doc;
|
||||||
Elements images = new Elements();
|
Elements images = new Elements();
|
||||||
try {
|
try {
|
||||||
logger.debug("Loading " + rawUrl);
|
LOGGER.debug("Loading " + rawUrl);
|
||||||
super.retrievingSource(rawUrl);
|
super.retrievingSource(rawUrl);
|
||||||
doc = Http.url(rawUrl).get();
|
doc = Http.url(rawUrl).get();
|
||||||
images = doc.select("div#preload img");
|
images = doc.select("div#preload img");
|
||||||
}
|
}
|
||||||
catch (IOException e) {
|
catch (IOException e) {
|
||||||
logger.error("Error fetching full-size image from " + rawUrl, e);
|
LOGGER.error("Error fetching full-size image from " + rawUrl, e);
|
||||||
}
|
}
|
||||||
if (!images.isEmpty()) {
|
if (!images.isEmpty()) {
|
||||||
imageURL = images.first().attr("src");
|
imageURL = images.first().attr("src");
|
||||||
logger.debug("Found full-size non-watermarked image: " + imageURL);
|
LOGGER.debug("Found full-size non-watermarked image: " + imageURL);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
logger.debug("Falling back to image_url from API response");
|
LOGGER.debug("Falling back to image_url from API response");
|
||||||
imageURL = photo.getString("image_url");
|
imageURL = photo.getString("image_url");
|
||||||
imageURL = imageURL.replaceAll("/4\\.", "/5.");
|
imageURL = imageURL.replaceAll("/4\\.", "/5.");
|
||||||
// See if there's larger images
|
// See if there's larger images
|
||||||
@ -289,14 +289,14 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
|||||||
String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
|
String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
|
||||||
sleep(10);
|
sleep(10);
|
||||||
if (urlExists(fsURL)) {
|
if (urlExists(fsURL)) {
|
||||||
logger.info("Found larger image at " + fsURL);
|
LOGGER.info("Found larger image at " + fsURL);
|
||||||
imageURL = fsURL;
|
imageURL = fsURL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (imageURL == null) {
|
if (imageURL == null) {
|
||||||
logger.error("Failed to find image for photo " + photo.toString());
|
LOGGER.error("Failed to find image for photo " + photo.toString());
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
imageURLs.add(imageURL);
|
imageURLs.add(imageURL);
|
||||||
|
@ -251,7 +251,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
|||||||
Document doc = getLargestImagePageDocument(this.url);
|
Document doc = getLargestImagePageDocument(this.url);
|
||||||
Elements fullsizeImages = doc.select("div#allsizes-photo img");
|
Elements fullsizeImages = doc.select("div#allsizes-photo img");
|
||||||
if (fullsizeImages.isEmpty()) {
|
if (fullsizeImages.isEmpty()) {
|
||||||
logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
|
LOGGER.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
String prefix = "";
|
String prefix = "";
|
||||||
@ -263,7 +263,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
private String getImageFromPost(String url) {
|
private String getImageFromPost(String url) {
|
||||||
try {
|
try {
|
||||||
logger.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
|
LOGGER.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
|
||||||
return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content");
|
return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
return "";
|
return "";
|
||||||
@ -103,7 +103,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
|||||||
Elements urlElements = page.select("figure.t-image > b > u > a");
|
Elements urlElements = page.select("figure.t-image > b > u > a");
|
||||||
for (Element e : urlElements) {
|
for (Element e : urlElements) {
|
||||||
urls.add(urlBase + e.select("a").first().attr("href"));
|
urls.add(urlBase + e.select("a").first().attr("href"));
|
||||||
logger.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
|
LOGGER.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
|
||||||
}
|
}
|
||||||
return urls;
|
return urls;
|
||||||
}
|
}
|
||||||
@ -122,21 +122,21 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
|||||||
// Try to find the description
|
// Try to find the description
|
||||||
Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
|
Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
|
||||||
if (els.isEmpty()) {
|
if (els.isEmpty()) {
|
||||||
logger.debug("No description at " + page);
|
LOGGER.debug("No description at " + page);
|
||||||
throw new IOException("No description found");
|
throw new IOException("No description found");
|
||||||
}
|
}
|
||||||
logger.debug("Description found!");
|
LOGGER.debug("Description found!");
|
||||||
Document documentz = resp.parse();
|
Document documentz = resp.parse();
|
||||||
Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is.
|
Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is.
|
||||||
// Would break completely if FurAffinity changed site layout.
|
// Would break completely if FurAffinity changed site layout.
|
||||||
documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
|
documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
|
||||||
ele.select("br").append("\\n");
|
ele.select("br").append("\\n");
|
||||||
ele.select("p").prepend("\\n\\n");
|
ele.select("p").prepend("\\n\\n");
|
||||||
logger.debug("Returning description at " + page);
|
LOGGER.debug("Returning description at " + page);
|
||||||
String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
|
String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
|
||||||
return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
|
return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
|
LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -171,12 +171,12 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
|||||||
out.write(text.getBytes());
|
out.write(text.getBytes());
|
||||||
out.close();
|
out.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||||
if (!saveFileAs.getParentFile().exists()) {
|
if (!saveFileAs.getParentFile().exists()) {
|
||||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||||
saveFileAs.getParentFile().mkdirs();
|
saveFileAs.getParentFile().mkdirs();
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -67,7 +67,7 @@ public class FuskatorRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
baseUrl = URLDecoder.decode(baseUrl, "UTF-8");
|
baseUrl = URLDecoder.decode(baseUrl, "UTF-8");
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
logger.warn("Error while decoding " + baseUrl, e);
|
LOGGER.warn("Error while decoding " + baseUrl, e);
|
||||||
}
|
}
|
||||||
if (baseUrl.startsWith("//")) {
|
if (baseUrl.startsWith("//")) {
|
||||||
baseUrl = "http:" + baseUrl;
|
baseUrl = "http:" + baseUrl;
|
||||||
|
@ -40,7 +40,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + elems.first().text();
|
return getHost() + "_" + elems.first().text();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ public class HbrowseRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + title + "_" + getGID(url);
|
return getHost() + "_" + title + "_" + getGID(url);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean pageContainsAlbums(URL url) {
|
public boolean pageContainsAlbums(URL url) {
|
||||||
logger.info("Page contains albums");
|
LOGGER.info("Page contains albums");
|
||||||
Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
||||||
Matcher mat = pat.matcher(url.toExternalForm());
|
Matcher mat = pat.matcher(url.toExternalForm());
|
||||||
if (mat.matches()) {
|
if (mat.matches()) {
|
||||||
@ -95,7 +95,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + getGID(url);
|
return getHost() + "_" + getGID(url);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
|||||||
cookies.putAll(resp.cookies());
|
cookies.putAll(resp.cookies());
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
logger.info("unable to find csrf_token and set filter");
|
LOGGER.info("unable to find csrf_token and set filter");
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = Http.url(url)
|
resp = Http.url(url)
|
||||||
@ -139,19 +139,19 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
Matcher imgMatcher = imgRegex.matcher(thumb.attr("href"));
|
Matcher imgMatcher = imgRegex.matcher(thumb.attr("href"));
|
||||||
if (!imgMatcher.matches()) {
|
if (!imgMatcher.matches()) {
|
||||||
logger.info("Couldn't find user & image ID in " + thumb.attr("href"));
|
LOGGER.info("Couldn't find user & image ID in " + thumb.attr("href"));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Document imagePage;
|
Document imagePage;
|
||||||
try {
|
try {
|
||||||
|
|
||||||
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
LOGGER.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
||||||
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
catch (IOException e) {
|
catch (IOException e) {
|
||||||
logger.debug(e.getMessage());
|
LOGGER.debug(e.getMessage());
|
||||||
logger.debug("Warning: imagePage is null!");
|
LOGGER.debug("Warning: imagePage is null!");
|
||||||
imagePage = null;
|
imagePage = null;
|
||||||
}
|
}
|
||||||
// This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
|
// This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
|
||||||
|
@ -9,9 +9,7 @@ import java.util.regex.Matcher;
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONObject;
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
@ -57,7 +55,7 @@ public class HitomiRipper extends AbstractHTMLRipper {
|
|||||||
public List<String> getURLsFromPage(Document doc) {
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
String json = doc.text().replaceAll("var galleryinfo =", "");
|
String json = doc.text().replaceAll("var galleryinfo =", "");
|
||||||
logger.info(json);
|
LOGGER.info(json);
|
||||||
JSONArray json_data = new JSONArray(json);
|
JSONArray json_data = new JSONArray(json);
|
||||||
for (int i = 0; i < json_data.length(); i++) {
|
for (int i = 0; i < json_data.length(); i++) {
|
||||||
result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
|
result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
|
||||||
|
@ -14,8 +14,6 @@ import org.jsoup.nodes.Element;
|
|||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
import javax.print.Doc;
|
|
||||||
|
|
||||||
public class HypnohubRipper extends AbstractHTMLRipper {
|
public class HypnohubRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
public HypnohubRipper(URL url) throws IOException {
|
public HypnohubRipper(URL url) throws IOException {
|
||||||
@ -55,14 +53,14 @@ public class HypnohubRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private String ripPost(String url) throws IOException {
|
private String ripPost(String url) throws IOException {
|
||||||
logger.info(url);
|
LOGGER.info(url);
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
return "https:" + doc.select("img.image").attr("src");
|
return "https:" + doc.select("img.image").attr("src");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private String ripPost(Document doc) {
|
private String ripPost(Document doc) {
|
||||||
logger.info(url);
|
LOGGER.info(url);
|
||||||
return "https:" + doc.select("img.image").attr("src");
|
return "https:" + doc.select("img.image").attr("src");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -99,16 +99,16 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
// Attempt to use album title as GID
|
// Attempt to use album title as GID
|
||||||
Elements elems = getFirstPage().select("legend");
|
Elements elems = getFirstPage().select("legend");
|
||||||
String title = elems.first().text();
|
String title = elems.first().text();
|
||||||
logger.info("Title text: '" + title + "'");
|
LOGGER.info("Title text: '" + title + "'");
|
||||||
Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$");
|
Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$");
|
||||||
Matcher m = p.matcher(title);
|
Matcher m = p.matcher(title);
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")";
|
return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")";
|
||||||
}
|
}
|
||||||
logger.info("Doesn't match " + p.pattern());
|
LOGGER.info("Doesn't match " + p.pattern());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -148,14 +148,14 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
//the direct link to the image seems to always be linked in the <meta> part of the html.
|
//the direct link to the image seems to always be linked in the <meta> part of the html.
|
||||||
if (metaTag.attr("property").equals("og:image")) {
|
if (metaTag.attr("property").equals("og:image")) {
|
||||||
imgsrc = metaTag.attr("content");
|
imgsrc = metaTag.attr("content");
|
||||||
logger.info("Found URL " + imgsrc);
|
LOGGER.info("Found URL " + imgsrc);
|
||||||
break;//only one (useful) image possible for an "image page".
|
break;//only one (useful) image possible for an "image page".
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//for debug, or something goes wrong.
|
//for debug, or something goes wrong.
|
||||||
if (imgsrc.isEmpty()) {
|
if (imgsrc.isEmpty()) {
|
||||||
logger.warn("Image not found at " + this.url);
|
LOGGER.warn("Image not found at " + this.url);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
addURLToDownload(new URL(imgsrc), prefix);
|
addURLToDownload(new URL(imgsrc), prefix);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
|
|||||||
newURL += "p";
|
newURL += "p";
|
||||||
}
|
}
|
||||||
newURL += "gid=" + gid + "&view=2";
|
newURL += "gid=" + gid + "&view=2";
|
||||||
logger.debug("Changed URL from " + url + " to " + newURL);
|
LOGGER.debug("Changed URL from " + url + " to " + newURL);
|
||||||
return new URL(newURL);
|
return new URL(newURL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
|
|||||||
// Find image
|
// Find image
|
||||||
Elements images = doc.select("a > img");
|
Elements images = doc.select("a > img");
|
||||||
if (images.isEmpty()) {
|
if (images.isEmpty()) {
|
||||||
logger.warn("Image not found at " + this.url);
|
LOGGER.warn("Image not found at " + this.url);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Element image = images.first();
|
Element image = images.first();
|
||||||
@ -115,7 +115,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
addURLToDownload(new URL(imgsrc), prefix);
|
addURLToDownload(new URL(imgsrc), prefix);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -108,24 +108,24 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
String title = null;
|
String title = null;
|
||||||
final String defaultTitle1 = "Imgur: The most awesome images on the Internet";
|
final String defaultTitle1 = "Imgur: The most awesome images on the Internet";
|
||||||
final String defaultTitle2 = "Imgur: The magic of the Internet";
|
final String defaultTitle2 = "Imgur: The magic of the Internet";
|
||||||
logger.info("Trying to get album title");
|
LOGGER.info("Trying to get album title");
|
||||||
elems = albumDoc.select("meta[property=og:title]");
|
elems = albumDoc.select("meta[property=og:title]");
|
||||||
if (elems != null) {
|
if (elems != null) {
|
||||||
title = elems.attr("content");
|
title = elems.attr("content");
|
||||||
logger.debug("Title is " + title);
|
LOGGER.debug("Title is " + title);
|
||||||
}
|
}
|
||||||
// This is here encase the album is unnamed, to prevent
|
// This is here encase the album is unnamed, to prevent
|
||||||
// Imgur: The most awesome images on the Internet from being added onto the album name
|
// Imgur: The most awesome images on the Internet from being added onto the album name
|
||||||
if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) {
|
if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) {
|
||||||
logger.debug("Album is untitled or imgur is returning the default title");
|
LOGGER.debug("Album is untitled or imgur is returning the default title");
|
||||||
// We set the title to "" here because if it's found in the next few attempts it will be changed
|
// We set the title to "" here because if it's found in the next few attempts it will be changed
|
||||||
// but if it's nto found there will be no reason to set it later
|
// but if it's nto found there will be no reason to set it later
|
||||||
title = "";
|
title = "";
|
||||||
logger.debug("Trying to use title tag to get title");
|
LOGGER.debug("Trying to use title tag to get title");
|
||||||
elems = albumDoc.select("title");
|
elems = albumDoc.select("title");
|
||||||
if (elems != null) {
|
if (elems != null) {
|
||||||
if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) {
|
if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) {
|
||||||
logger.debug("Was unable to get album title or album was untitled");
|
LOGGER.debug("Was unable to get album title or album was untitled");
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
title = elems.text();
|
title = elems.text();
|
||||||
@ -159,29 +159,29 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
case ALBUM:
|
case ALBUM:
|
||||||
// Fall-through
|
// Fall-through
|
||||||
case USER_ALBUM:
|
case USER_ALBUM:
|
||||||
logger.info("Album type is USER_ALBUM");
|
LOGGER.info("Album type is USER_ALBUM");
|
||||||
// Don't call getAlbumTitle(this.url) with this
|
// Don't call getAlbumTitle(this.url) with this
|
||||||
// as it seems to cause the album to be downloaded to a subdir.
|
// as it seems to cause the album to be downloaded to a subdir.
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case SERIES_OF_IMAGES:
|
case SERIES_OF_IMAGES:
|
||||||
logger.info("Album type is SERIES_OF_IMAGES");
|
LOGGER.info("Album type is SERIES_OF_IMAGES");
|
||||||
ripAlbum(this.url);
|
ripAlbum(this.url);
|
||||||
break;
|
break;
|
||||||
case SINGLE_IMAGE:
|
case SINGLE_IMAGE:
|
||||||
logger.info("Album type is SINGLE_IMAGE");
|
LOGGER.info("Album type is SINGLE_IMAGE");
|
||||||
ripSingleImage(this.url);
|
ripSingleImage(this.url);
|
||||||
break;
|
break;
|
||||||
case USER:
|
case USER:
|
||||||
logger.info("Album type is USER");
|
LOGGER.info("Album type is USER");
|
||||||
ripUserAccount(url);
|
ripUserAccount(url);
|
||||||
break;
|
break;
|
||||||
case SUBREDDIT:
|
case SUBREDDIT:
|
||||||
logger.info("Album type is SUBREDDIT");
|
LOGGER.info("Album type is SUBREDDIT");
|
||||||
ripSubreddit(url);
|
ripSubreddit(url);
|
||||||
break;
|
break;
|
||||||
case USER_IMAGES:
|
case USER_IMAGES:
|
||||||
logger.info("Album type is USER_IMAGES");
|
LOGGER.info("Album type is USER_IMAGES");
|
||||||
ripUserImages(url);
|
ripUserImages(url);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -241,7 +241,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
String[] imageIds = m.group(1).split(",");
|
String[] imageIds = m.group(1).split(",");
|
||||||
for (String imageId : imageIds) {
|
for (String imageId : imageIds) {
|
||||||
// TODO: Fetch image with ID imageId
|
// TODO: Fetch image with ID imageId
|
||||||
logger.debug("Fetching image info for ID " + imageId);
|
LOGGER.debug("Fetching image info for ID " + imageId);
|
||||||
try {
|
try {
|
||||||
JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON();
|
JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON();
|
||||||
if (!json.has("image")) {
|
if (!json.has("image")) {
|
||||||
@ -259,7 +259,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
ImgurImage theImage = new ImgurImage(new URL(original));
|
ImgurImage theImage = new ImgurImage(new URL(original));
|
||||||
album.addImage(theImage);
|
album.addImage(theImage);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Got exception while fetching imgur ID " + imageId, e);
|
LOGGER.error("Got exception while fetching imgur ID " + imageId, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
if (!strUrl.contains(",")) {
|
if (!strUrl.contains(",")) {
|
||||||
strUrl += "/all";
|
strUrl += "/all";
|
||||||
}
|
}
|
||||||
logger.info(" Retrieving " + strUrl);
|
LOGGER.info(" Retrieving " + strUrl);
|
||||||
Document doc = getDocument(strUrl);
|
Document doc = getDocument(strUrl);
|
||||||
// Try to use embedded JSON to retrieve images
|
// Try to use embedded JSON to retrieve images
|
||||||
Matcher m = getEmbeddedJsonMatcher(doc);
|
Matcher m = getEmbeddedJsonMatcher(doc);
|
||||||
@ -283,7 +283,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
.getJSONArray("images");
|
.getJSONArray("images");
|
||||||
return createImgurAlbumFromJsonArray(url, jsonImages);
|
return createImgurAlbumFromJsonArray(url, jsonImages);
|
||||||
} catch (JSONException e) {
|
} catch (JSONException e) {
|
||||||
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,10 +291,10 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
// http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID
|
// http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID
|
||||||
// At the least, get the thumbnails.
|
// At the least, get the thumbnails.
|
||||||
|
|
||||||
logger.info("[!] Falling back to /noscript method");
|
LOGGER.info("[!] Falling back to /noscript method");
|
||||||
|
|
||||||
String newUrl = url.toExternalForm() + "/noscript";
|
String newUrl = url.toExternalForm() + "/noscript";
|
||||||
logger.info(" Retrieving " + newUrl);
|
LOGGER.info(" Retrieving " + newUrl);
|
||||||
doc = Jsoup.connect(newUrl)
|
doc = Jsoup.connect(newUrl)
|
||||||
.userAgent(USER_AGENT)
|
.userAgent(USER_AGENT)
|
||||||
.get();
|
.get();
|
||||||
@ -311,7 +311,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
image = "http:" + thumb.select("img").attr("src");
|
image = "http:" + thumb.select("img").attr("src");
|
||||||
} else {
|
} else {
|
||||||
// Unable to find image in this div
|
// Unable to find image in this div
|
||||||
logger.error("[!] Unable to find image in div: " + thumb.toString());
|
LOGGER.error("[!] Unable to find image in div: " + thumb.toString());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
||||||
@ -368,7 +368,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void ripUserAccount(URL url) throws IOException {
|
private void ripUserAccount(URL url) throws IOException {
|
||||||
logger.info("Retrieving " + url);
|
LOGGER.info("Retrieving " + url);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
for (Element album : doc.select("div.cover a")) {
|
for (Element album : doc.select("div.cover a")) {
|
||||||
@ -383,7 +383,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
ripAlbum(albumURL, albumID);
|
ripAlbum(albumURL, albumID);
|
||||||
Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000);
|
Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Error while ripping album: " + e.getMessage(), e);
|
LOGGER.error("Error while ripping album: " + e.getMessage(), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -420,7 +420,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Error while ripping user images: " + e.getMessage(), e);
|
LOGGER.error("Error while ripping user images: " + e.getMessage(), e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -435,7 +435,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
pageURL += "/";
|
pageURL += "/";
|
||||||
}
|
}
|
||||||
pageURL += "page/" + page + "/miss?scrolled";
|
pageURL += "page/" + page + "/miss?scrolled";
|
||||||
logger.info(" Retrieving " + pageURL);
|
LOGGER.info(" Retrieving " + pageURL);
|
||||||
Document doc = Http.url(pageURL).get();
|
Document doc = Http.url(pageURL).get();
|
||||||
Elements imgs = doc.select(".post img");
|
Elements imgs = doc.select(".post img");
|
||||||
for (Element img : imgs) {
|
for (Element img : imgs) {
|
||||||
@ -456,7 +456,7 @@ public class ImgurRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("Interrupted while waiting to load next album: ", e);
|
LOGGER.error("Interrupted while waiting to load next album: ", e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
|||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
import org.jsoup.Connection;
|
import org.jsoup.Connection;
|
||||||
import org.jsoup.Jsoup;
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
@ -67,7 +66,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
@Override
|
@Override
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||||
URL san_url = new URL(url.toExternalForm().replaceAll("\\?hl=\\S*", ""));
|
URL san_url = new URL(url.toExternalForm().replaceAll("\\?hl=\\S*", ""));
|
||||||
logger.info("sanitized URL is " + san_url.toExternalForm());
|
LOGGER.info("sanitized URL is " + san_url.toExternalForm());
|
||||||
return san_url;
|
return san_url;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +183,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
@Override
|
@Override
|
||||||
public JSONObject getFirstPage() throws IOException {
|
public JSONObject getFirstPage() throws IOException {
|
||||||
Connection.Response resp = Http.url(url).response();
|
Connection.Response resp = Http.url(url).response();
|
||||||
logger.info(resp.cookies());
|
LOGGER.info(resp.cookies());
|
||||||
csrftoken = resp.cookie("csrftoken");
|
csrftoken = resp.cookie("csrftoken");
|
||||||
Document p = resp.parse();
|
Document p = resp.parse();
|
||||||
// Get the query hash so we can download the next page
|
// Get the query hash so we can download the next page
|
||||||
@ -197,7 +196,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
Document doc = Http.url("https://www.instagram.com/p/" + videoID).get();
|
Document doc = Http.url("https://www.instagram.com/p/" + videoID).get();
|
||||||
return doc.select("meta[property=og:video]").attr("content");
|
return doc.select("meta[property=og:video]").attr("content");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
|
LOGGER.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
|
||||||
}
|
}
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
@ -279,9 +278,9 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.error("Unable to download slide show, URL was malformed");
|
LOGGER.error("Unable to download slide show, URL was malformed");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Unable to download slide show");
|
LOGGER.error("Unable to download slide show");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -312,7 +311,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else { // We're ripping from a single page
|
} else { // We're ripping from a single page
|
||||||
logger.info("Ripping from single page");
|
LOGGER.info("Ripping from single page");
|
||||||
imageURLs = getPostsFromSinglePage(json);
|
imageURLs = getPostsFromSinglePage(json);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,7 +320,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
|
|
||||||
private String getIGGis(String variables) {
|
private String getIGGis(String variables) {
|
||||||
String stringToMD5 = rhx_gis + ":" + variables;
|
String stringToMD5 = rhx_gis + ":" + variables;
|
||||||
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
|
LOGGER.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||||
try {
|
try {
|
||||||
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
||||||
|
|
||||||
@ -355,7 +354,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
||||||
"&variables=" + vars, ig_gis);
|
"&variables=" + vars, ig_gis);
|
||||||
// Sleep for a while to avoid a ban
|
// Sleep for a while to avoid a ban
|
||||||
logger.info(toreturn);
|
LOGGER.info(toreturn);
|
||||||
if (!pageHasImages(toreturn)) {
|
if (!pageHasImages(toreturn)) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
@ -371,7 +370,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
sleep(2500);
|
sleep(2500);
|
||||||
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
||||||
String ig_gis = getIGGis(vars);
|
String ig_gis = getIGGis(vars);
|
||||||
logger.info(ig_gis);
|
LOGGER.info(ig_gis);
|
||||||
|
|
||||||
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
|
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
|
||||||
if (!pageHasImages(toreturn)) {
|
if (!pageHasImages(toreturn)) {
|
||||||
@ -419,11 +418,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
return new JSONObject(sb.toString());
|
return new JSONObject(sb.toString());
|
||||||
|
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.info("Unable to get query_hash, " + url + " is a malformed URL");
|
LOGGER.info("Unable to get query_hash, " + url + " is a malformed URL");
|
||||||
return null;
|
return null;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Unable to get query_hash");
|
LOGGER.info("Unable to get query_hash");
|
||||||
logger.info(e.getMessage());
|
LOGGER.info(e.getMessage());
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -444,11 +443,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
in.close();
|
in.close();
|
||||||
|
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
LOGGER.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
||||||
return null;
|
return null;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Unable to get query_hash");
|
LOGGER.info("Unable to get query_hash");
|
||||||
logger.info(e.getMessage());
|
LOGGER.info(e.getMessage());
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (!rippingTag) {
|
if (!rippingTag) {
|
||||||
@ -467,6 +466,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
if (m.find()) {
|
if (m.find()) {
|
||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
|
jsP = Pattern.compile("o.pagination},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||||
@ -475,7 +479,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
|||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.error("Could not find query_hash on " + jsFileURL);
|
LOGGER.error("Could not find query_hash on " + jsFileURL);
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -54,16 +54,16 @@ public class JagodibujaRipper extends AbstractHTMLRipper {
|
|||||||
sleep(500);
|
sleep(500);
|
||||||
Document comicPage = Http.url(comicPageUrl.attr("href")).get();
|
Document comicPage = Http.url(comicPageUrl.attr("href")).get();
|
||||||
Element elem = comicPage.select("span.full-size-link > a").first();
|
Element elem = comicPage.select("span.full-size-link > a").first();
|
||||||
logger.info("Got link " + elem.attr("href"));
|
LOGGER.info("Got link " + elem.attr("href"));
|
||||||
try {
|
try {
|
||||||
addURLToDownload(new URL(elem.attr("href")), "");
|
addURLToDownload(new URL(elem.attr("href")), "");
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.warn("Malformed URL");
|
LOGGER.warn("Malformed URL");
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
result.add(elem.attr("href"));
|
result.add(elem.attr("href"));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Error loading " + comicPageUrl);
|
LOGGER.info("Error loading " + comicPageUrl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -36,7 +36,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
|
|||||||
// "url" is an instance field of the superclass
|
// "url" is an instance field of the superclass
|
||||||
Document page = Http.url(url).get();
|
Document page = Http.url(url).get();
|
||||||
URL firstUrl = new URL("https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
URL firstUrl = new URL("https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
||||||
logger.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
LOGGER.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
||||||
return Http.url(firstUrl).get();
|
return Http.url(firstUrl).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@ import org.jsoup.nodes.Element;
|
|||||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
import javax.print.Doc;
|
|
||||||
|
|
||||||
public class ManganeloRipper extends AbstractHTMLRipper {
|
public class ManganeloRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
public ManganeloRipper(URL url) throws IOException {
|
public ManganeloRipper(URL url) throws IOException {
|
||||||
@ -67,7 +65,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private List<String> getURLsFromChap(String url) {
|
private List<String> getURLsFromChap(String url) {
|
||||||
logger.debug("Getting urls from " + url);
|
LOGGER.debug("Getting urls from " + url);
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
try {
|
try {
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
@ -82,7 +80,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private List<String> getURLsFromChap(Document doc) {
|
private List<String> getURLsFromChap(Document doc) {
|
||||||
logger.debug("Getting urls from " + url);
|
LOGGER.debug("Getting urls from " + url);
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
for (Element el : doc.select("img.img_content")) {
|
for (Element el : doc.select("img.img_content")) {
|
||||||
result.add(el.attr("src"));
|
result.add(el.attr("src"));
|
||||||
|
@ -75,7 +75,7 @@ public class MotherlessRipper extends AlbumRipper {
|
|||||||
if (isStopped()) {
|
if (isStopped()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
logger.info("Retrieving " + nextURL);
|
LOGGER.info("Retrieving " + nextURL);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
||||||
Document doc = Http.url(nextURL)
|
Document doc = Http.url(nextURL)
|
||||||
.referrer("http://motherless.com")
|
.referrer("http://motherless.com")
|
||||||
@ -152,10 +152,10 @@ public class MotherlessRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
addURLToDownload(new URL(file), prefix);
|
addURLToDownload(new URL(file), prefix);
|
||||||
} else {
|
} else {
|
||||||
logger.warn("[!] could not find '__fileurl' at " + url);
|
LOGGER.warn("[!] could not find '__fileurl' at " + url);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ public class NatalieMuRipper extends AbstractHTMLRipper {
|
|||||||
imgUrl = imgUrl.replace("list_thumb_inbox","xlarge");
|
imgUrl = imgUrl.replace("list_thumb_inbox","xlarge");
|
||||||
// Don't download the same URL twice
|
// Don't download the same URL twice
|
||||||
if (imageURLs.contains(imgUrl)) {
|
if (imageURLs.contains(imgUrl)) {
|
||||||
logger.debug("Already attempted: " + imgUrl);
|
LOGGER.debug("Already attempted: " + imgUrl);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
imageURLs.add(imgUrl);
|
imageURLs.add(imgUrl);
|
||||||
|
@ -43,7 +43,7 @@ public class NewsfilterRipper extends AlbumRipper {
|
|||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
String gid = getGID(this.url);
|
String gid = getGID(this.url);
|
||||||
String theurl = "http://newsfilter.org/gallery/" + gid;
|
String theurl = "http://newsfilter.org/gallery/" + gid;
|
||||||
logger.info("Loading " + theurl);
|
LOGGER.info("Loading " + theurl);
|
||||||
|
|
||||||
Connection.Response resp = Jsoup.connect(theurl)
|
Connection.Response resp = Jsoup.connect(theurl)
|
||||||
.timeout(5000)
|
.timeout(5000)
|
||||||
|
@ -86,7 +86,7 @@ public class NfsfwRipper extends AlbumRipper {
|
|||||||
String nextURL = nextAlbum.first;
|
String nextURL = nextAlbum.first;
|
||||||
String nextSubalbum = nextAlbum.second;
|
String nextSubalbum = nextAlbum.second;
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
||||||
logger.info(" Retrieving " + nextURL);
|
LOGGER.info(" Retrieving " + nextURL);
|
||||||
if (albumDoc == null) {
|
if (albumDoc == null) {
|
||||||
albumDoc = Http.url(nextURL).get();
|
albumDoc = Http.url(nextURL).get();
|
||||||
}
|
}
|
||||||
@ -116,7 +116,7 @@ public class NfsfwRipper extends AlbumRipper {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException mue) {
|
} catch (MalformedURLException mue) {
|
||||||
logger.warn("Invalid URL: " + imagePage);
|
LOGGER.warn("Invalid URL: " + imagePage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (isThisATest()) {
|
if (isThisATest()) {
|
||||||
@ -133,7 +133,7 @@ public class NfsfwRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("Interrupted while waiting to load next page", e);
|
LOGGER.error("Interrupted while waiting to load next page", e);
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +168,7 @@ public class NfsfwRipper extends AlbumRipper {
|
|||||||
.get();
|
.get();
|
||||||
Elements images = doc.select(".gbBlock img");
|
Elements images = doc.select(".gbBlock img");
|
||||||
if (images.isEmpty()) {
|
if (images.isEmpty()) {
|
||||||
logger.error("Failed to find image at " + this.url);
|
LOGGER.error("Failed to find image at " + this.url);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
String file = images.first().attr("src");
|
String file = images.first().attr("src");
|
||||||
@ -181,7 +181,7 @@ public class NfsfwRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
addURLToDownload(new URL(file), prefix, this.subdir);
|
addURLToDownload(new URL(file), prefix, this.subdir);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@ import org.jsoup.nodes.Document;
|
|||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
import org.jsoup.select.Elements;
|
import org.jsoup.select.Elements;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
@ -102,7 +101,7 @@ public class NhentaiRipper extends AbstractHTMLRipper {
|
|||||||
if (blackListedTags == null) {
|
if (blackListedTags == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
logger.info("Blacklisted tags " + blackListedTags[0]);
|
LOGGER.info("Blacklisted tags " + blackListedTags[0]);
|
||||||
List<String> tagsOnPage = getTags(doc);
|
List<String> tagsOnPage = getTags(doc);
|
||||||
for (String tag : blackListedTags) {
|
for (String tag : blackListedTags) {
|
||||||
for (String pageTag : tagsOnPage) {
|
for (String pageTag : tagsOnPage) {
|
||||||
|
@ -35,7 +35,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||||
logger.info(url);
|
LOGGER.info(url);
|
||||||
String u = url.toExternalForm();
|
String u = url.toExternalForm();
|
||||||
if (u.contains("?")) {
|
if (u.contains("?")) {
|
||||||
u = u.substring(0, u.indexOf("?"));
|
u = u.substring(0, u.indexOf("?"));
|
||||||
@ -100,12 +100,12 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
String nextSub = subsToRip.remove(0);
|
String nextSub = subsToRip.remove(0);
|
||||||
rippedSubs.add(nextSub);
|
rippedSubs.add(nextSub);
|
||||||
logger.info("Attempting to rip next subalbum: " + nextSub);
|
LOGGER.info("Attempting to rip next subalbum: " + nextSub);
|
||||||
try {
|
try {
|
||||||
pageResponse = null;
|
pageResponse = null;
|
||||||
subalbums = ripAlbumAndGetSubalbums(nextSub);
|
subalbums = ripAlbumAndGetSubalbums(nextSub);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Error while ripping " + nextSub, e);
|
LOGGER.error("Error while ripping " + nextSub, e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for (String subalbum : subalbums) {
|
for (String subalbum : subalbums) {
|
||||||
@ -131,7 +131,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
pageIndex++;
|
pageIndex++;
|
||||||
if (pageIndex > 1 || pageResponse == null) {
|
if (pageIndex > 1 || pageResponse == null) {
|
||||||
url = theUrl + String.format("?sort=3&page=%d", pageIndex);
|
url = theUrl + String.format("?sort=3&page=%d", pageIndex);
|
||||||
logger.info(" Retrieving " + url);
|
LOGGER.info(" Retrieving " + url);
|
||||||
pageResponse = Http.url(url).response();
|
pageResponse = Http.url(url).response();
|
||||||
}
|
}
|
||||||
Document albumDoc = pageResponse.parse();
|
Document albumDoc = pageResponse.parse();
|
||||||
@ -153,7 +153,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (jsonString == null) {
|
if (jsonString == null) {
|
||||||
logger.error("Unable to find JSON data at URL: " + url);
|
LOGGER.error("Unable to find JSON data at URL: " + url);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
JSONObject json = new JSONObject(jsonString);
|
JSONObject json = new JSONObject(jsonString);
|
||||||
@ -189,7 +189,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
+ "&albumPath=" + currentAlbumPath // %2Falbums%2Fab10%2FSpazzySpizzy"
|
+ "&albumPath=" + currentAlbumPath // %2Falbums%2Fab10%2FSpazzySpizzy"
|
||||||
+ "&json=1";
|
+ "&json=1";
|
||||||
try {
|
try {
|
||||||
logger.info("Loading " + apiUrl);
|
LOGGER.info("Loading " + apiUrl);
|
||||||
JSONObject json = Http.url(apiUrl).getJSON();
|
JSONObject json = Http.url(apiUrl).getJSON();
|
||||||
JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
|
JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
|
||||||
for (int i = 0; i < subalbums.length(); i++) {
|
for (int i = 0; i < subalbums.length(); i++) {
|
||||||
@ -202,7 +202,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
|||||||
result.add(suburl);
|
result.add(suburl);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to get subalbums from " + apiUrl, e);
|
LOGGER.error("Failed to get subalbums from " + apiUrl, e);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
// Attempt to use album title as GID
|
// Attempt to use album title as GID
|
||||||
if (albumDoc == null) {
|
if (albumDoc == null) {
|
||||||
logger.info(" Retrieving " + url.toExternalForm());
|
LOGGER.info(" Retrieving " + url.toExternalForm());
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toString());
|
sendUpdate(STATUS.LOADING_RESOURCE, url.toString());
|
||||||
albumDoc = Http.url(url).get();
|
albumDoc = Http.url(url).get();
|
||||||
}
|
}
|
||||||
@ -54,7 +54,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
return HOST + "_" + elems.get(0).text();
|
return HOST + "_" + elems.get(0).text();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.warn("Failed to get album title from " + url, e);
|
LOGGER.warn("Failed to get album title from " + url, e);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -82,7 +82,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
String nextUrl = this.url.toExternalForm();
|
String nextUrl = this.url.toExternalForm();
|
||||||
|
|
||||||
if (albumDoc == null) {
|
if (albumDoc == null) {
|
||||||
logger.info(" Retrieving album page " + nextUrl);
|
LOGGER.info(" Retrieving album page " + nextUrl);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, nextUrl);
|
sendUpdate(STATUS.LOADING_RESOURCE, nextUrl);
|
||||||
albumDoc = Http.url(nextUrl)
|
albumDoc = Http.url(nextUrl)
|
||||||
.referrer(this.url)
|
.referrer(this.url)
|
||||||
@ -92,8 +92,8 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
// Find thumbnails
|
// Find thumbnails
|
||||||
Elements thumbs = albumDoc.select(".photoBlockBox li");
|
Elements thumbs = albumDoc.select(".photoBlockBox li");
|
||||||
if (thumbs.isEmpty()) {
|
if (thumbs.isEmpty()) {
|
||||||
logger.debug("albumDoc: " + albumDoc);
|
LOGGER.debug("albumDoc: " + albumDoc);
|
||||||
logger.debug("No images found at " + nextUrl);
|
LOGGER.debug("No images found at " + nextUrl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(IMAGE_SLEEP_TIME);
|
Thread.sleep(IMAGE_SLEEP_TIME);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.warn("Interrupted while waiting to load next image", e);
|
LOGGER.warn("Interrupted while waiting to load next image", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
Elements images = doc.select("#photoImageSection img");
|
Elements images = doc.select("#photoImageSection img");
|
||||||
Element image = images.first();
|
Element image = images.first();
|
||||||
String imgsrc = image.attr("src");
|
String imgsrc = image.attr("src");
|
||||||
logger.info("Found URL " + imgsrc + " via " + images.get(0));
|
LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||||
|
|
||||||
// Provide prefix and let the AbstractRipper "guess" the filename
|
// Provide prefix and let the AbstractRipper "guess" the filename
|
||||||
String prefix = "";
|
String prefix = "";
|
||||||
@ -167,7 +167,7 @@ public class PornhubRipper extends AlbumRipper {
|
|||||||
addURLToDownload(imgurl, prefix);
|
addURLToDownload(imgurl, prefix);
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,13 +4,10 @@ import java.io.File;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
|
||||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
|
||||||
import org.json.JSONArray;
|
import org.json.JSONArray;
|
||||||
import org.json.JSONObject;
|
import org.json.JSONObject;
|
||||||
import org.json.JSONTokener;
|
import org.json.JSONTokener;
|
||||||
@ -20,9 +17,6 @@ import com.rarchives.ripme.ui.UpdateUtils;
|
|||||||
import com.rarchives.ripme.utils.Http;
|
import com.rarchives.ripme.utils.Http;
|
||||||
import com.rarchives.ripme.utils.RipUtils;
|
import com.rarchives.ripme.utils.RipUtils;
|
||||||
import com.rarchives.ripme.utils.Utils;
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import org.jsoup.Jsoup;
|
|
||||||
import org.jsoup.nodes.Document;
|
|
||||||
import org.jsoup.nodes.Element;
|
|
||||||
|
|
||||||
public class RedditRipper extends AlbumRipper {
|
public class RedditRipper extends AlbumRipper {
|
||||||
|
|
||||||
@ -110,7 +104,7 @@ public class RedditRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(2000);
|
Thread.sleep(2000);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.warn("Interrupted while sleeping", e);
|
LOGGER.warn("Interrupted while sleeping", e);
|
||||||
}
|
}
|
||||||
return nextURL;
|
return nextURL;
|
||||||
}
|
}
|
||||||
@ -122,7 +116,7 @@ public class RedditRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(timeDiff);
|
Thread.sleep(timeDiff);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.warn("[!] Interrupted while waiting to load next page", e);
|
LOGGER.warn("[!] Interrupted while waiting to load next page", e);
|
||||||
return new JSONArray();
|
return new JSONArray();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -141,7 +135,7 @@ public class RedditRipper extends AlbumRipper {
|
|||||||
} else if (jsonObj instanceof JSONArray) {
|
} else if (jsonObj instanceof JSONArray) {
|
||||||
jsonArray = (JSONArray) jsonObj;
|
jsonArray = (JSONArray) jsonObj;
|
||||||
} else {
|
} else {
|
||||||
logger.warn("[!] Unable to parse JSON: " + jsonString);
|
LOGGER.warn("[!] Unable to parse JSON: " + jsonString);
|
||||||
}
|
}
|
||||||
return jsonArray;
|
return jsonArray;
|
||||||
}
|
}
|
||||||
|
@ -89,10 +89,10 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|||||||
String siteURL = "https://" + subDomain + "sankakucomplex.com";
|
String siteURL = "https://" + subDomain + "sankakucomplex.com";
|
||||||
// Get the page the full sized image is on
|
// Get the page the full sized image is on
|
||||||
Document subPage = Http.url(siteURL + postLink).get();
|
Document subPage = Http.url(siteURL + postLink).get();
|
||||||
logger.info("Checking page " + siteURL + postLink);
|
LOGGER.info("Checking page " + siteURL + postLink);
|
||||||
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("Error while loading page " + postLink, e);
|
LOGGER.warn("Error while loading page " + postLink, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return imageURLs;
|
return imageURLs;
|
||||||
@ -112,7 +112,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
|||||||
// Only logged in users can see past page 25
|
// Only logged in users can see past page 25
|
||||||
// Trying to rip page 26 will throw a no images found error
|
// Trying to rip page 26 will throw a no images found error
|
||||||
if (!nextPage.contains("page=26")) {
|
if (!nextPage.contains("page=26")) {
|
||||||
logger.info("Getting next page: " + pagination.attr("abs:next-page-url"));
|
LOGGER.info("Getting next page: " + pagination.attr("abs:next-page-url"));
|
||||||
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
|
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
|
|||||||
@Override
|
@Override
|
||||||
public Document getNextPage(Document doc) throws IOException {
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
Element elem = doc.select("td.style5 > a > img").last();
|
Element elem = doc.select("td.style5 > a > img").last();
|
||||||
logger.info(elem.parent().attr("href"));
|
LOGGER.info(elem.parent().attr("href"));
|
||||||
if (elem == null || elem.parent().attr("href").equals("view.php?date=")) {
|
if (elem == null || elem.parent().attr("href").equals("view.php?date=")) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
|
@ -65,9 +65,9 @@ public class StaRipper extends AbstractHTMLRipper {
|
|||||||
cookies.putAll(resp.cookies());
|
cookies.putAll(resp.cookies());
|
||||||
thumbPage = resp.parse();
|
thumbPage = resp.parse();
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.info(thumbPageURL + " is a malformed URL");
|
LOGGER.info(thumbPageURL + " is a malformed URL");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info(e.getMessage());
|
LOGGER.info(e.getMessage());
|
||||||
}
|
}
|
||||||
String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href");
|
String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href");
|
||||||
if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) {
|
if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) {
|
||||||
@ -97,10 +97,10 @@ public class StaRipper extends AbstractHTMLRipper {
|
|||||||
.followRedirects(false)
|
.followRedirects(false)
|
||||||
.execute();
|
.execute();
|
||||||
String imageURL = response.header("Location");
|
String imageURL = response.header("Location");
|
||||||
logger.info(imageURL);
|
LOGGER.info(imageURL);
|
||||||
return imageURL;
|
return imageURL;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info("Got error message " + e.getMessage() + " trying to download " + url);
|
LOGGER.info("Got error message " + e.getMessage() + " trying to download " + url);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
|
|||||||
List<String> urls = new ArrayList<>();
|
List<String> urls = new ArrayList<>();
|
||||||
String html = page.data();
|
String html = page.data();
|
||||||
if (!html.contains("episodeList : ")) {
|
if (!html.contains("episodeList : ")) {
|
||||||
logger.error("No 'episodeList' found at " + this.url);
|
LOGGER.error("No 'episodeList' found at " + this.url);
|
||||||
return urls;
|
return urls;
|
||||||
}
|
}
|
||||||
String jsonString = Utils.between(html, "episodeList : ", ",\n").get(0);
|
String jsonString = Utils.between(html, "episodeList : ", ",\n").get(0);
|
||||||
@ -93,7 +93,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("[!] Exception while downloading " + url, e);
|
LOGGER.error("[!] Exception while downloading " + url, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ public class TeenplanetRipper extends AlbumRipper {
|
|||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
int index = 0;
|
int index = 0;
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||||
if (albumDoc == null) {
|
if (albumDoc == null) {
|
||||||
albumDoc = Http.url(url).get();
|
albumDoc = Http.url(url).get();
|
||||||
|
@ -40,7 +40,7 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
|||||||
JSONObject json = new JSONObject(jsonInfo);
|
JSONObject json = new JSONObject(jsonInfo);
|
||||||
return json.getJSONArray("reader_page_urls");
|
return json.getJSONArray("reader_page_urls");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.info(e);
|
LOGGER.info(e);
|
||||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
|
||||||
+ getAlbumID() + " and try again");
|
+ getAlbumID() + " and try again");
|
||||||
return null;
|
return null;
|
||||||
|
@ -51,11 +51,11 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) {
|
if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) {
|
||||||
logger.info("Using api key: " + API_KEY);
|
LOGGER.info("Using api key: " + API_KEY);
|
||||||
return API_KEY;
|
return API_KEY;
|
||||||
} else {
|
} else {
|
||||||
String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
|
String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
|
||||||
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
LOGGER.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
||||||
return userDefinedAPIKey;
|
return userDefinedAPIKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
|
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
|
||||||
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
|
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
|
||||||
int genNum = new Random().nextInt(APIKEYS.size());
|
int genNum = new Random().nextInt(APIKEYS.size());
|
||||||
logger.info(genNum);
|
LOGGER.info(genNum);
|
||||||
final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
||||||
return API_KEY;
|
return API_KEY;
|
||||||
}
|
}
|
||||||
@ -96,10 +96,10 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
if (StringUtils.countMatches(u, ".") > 2) {
|
if (StringUtils.countMatches(u, ".") > 2) {
|
||||||
url = new URL(u.replace(".tumblr.com", ""));
|
url = new URL(u.replace(".tumblr.com", ""));
|
||||||
if (isTumblrURL(url)) {
|
if (isTumblrURL(url)) {
|
||||||
logger.info("Detected tumblr site: " + url);
|
LOGGER.info("Detected tumblr site: " + url);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
logger.info("Not a tumblr site: " + url);
|
LOGGER.info("Not a tumblr site: " + url);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return url;
|
return url;
|
||||||
@ -115,7 +115,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
int status = json.getJSONObject("meta").getInt("status");
|
int status = json.getJSONObject("meta").getInt("status");
|
||||||
return status == 200;
|
return status == 200;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Error while checking possible tumblr domain: " + url.getHost(), e);
|
LOGGER.error("Error while checking possible tumblr domain: " + url.getHost(), e);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -150,7 +150,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
|
|
||||||
|
|
||||||
String apiURL = getTumblrApiURL(mediaType, offset);
|
String apiURL = getTumblrApiURL(mediaType, offset);
|
||||||
logger.info("Retrieving " + apiURL);
|
LOGGER.info("Retrieving " + apiURL);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
||||||
|
|
||||||
JSONObject json = null;
|
JSONObject json = null;
|
||||||
@ -165,7 +165,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
if (status.getStatusCode() == HttpURLConnection.HTTP_UNAUTHORIZED && !useDefaultApiKey) {
|
if (status.getStatusCode() == HttpURLConnection.HTTP_UNAUTHORIZED && !useDefaultApiKey) {
|
||||||
retry = true;
|
retry = true;
|
||||||
} else if (status.getStatusCode() == 429) {
|
} else if (status.getStatusCode() == 429) {
|
||||||
logger.error("Tumblr rate limit has been exceeded");
|
LOGGER.error("Tumblr rate limit has been exceeded");
|
||||||
sendUpdate(STATUS.DOWNLOAD_ERRORED,"Tumblr rate limit has been exceeded");
|
sendUpdate(STATUS.DOWNLOAD_ERRORED,"Tumblr rate limit has been exceeded");
|
||||||
exceededRateLimit = true;
|
exceededRateLimit = true;
|
||||||
break;
|
break;
|
||||||
@ -178,7 +178,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
String apiKey = getApiKey();
|
String apiKey = getApiKey();
|
||||||
|
|
||||||
String message = "401 Unauthorized. Will retry with default Tumblr API key: " + apiKey;
|
String message = "401 Unauthorized. Will retry with default Tumblr API key: " + apiKey;
|
||||||
logger.info(message);
|
LOGGER.info(message);
|
||||||
sendUpdate(STATUS.DOWNLOAD_WARN, message);
|
sendUpdate(STATUS.DOWNLOAD_WARN, message);
|
||||||
|
|
||||||
Utils.setConfigString(TUMBLR_AUTH_CONFIG_KEY, apiKey); // save the default key to the config
|
Utils.setConfigString(TUMBLR_AUTH_CONFIG_KEY, apiKey); // save the default key to the config
|
||||||
@ -186,7 +186,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
// retry loading the JSON
|
// retry loading the JSON
|
||||||
|
|
||||||
apiURL = getTumblrApiURL(mediaType, offset);
|
apiURL = getTumblrApiURL(mediaType, offset);
|
||||||
logger.info("Retrieving " + apiURL);
|
LOGGER.info("Retrieving " + apiURL);
|
||||||
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
||||||
|
|
||||||
json = Http.url(apiURL).getJSON();
|
json = Http.url(apiURL).getJSON();
|
||||||
@ -195,7 +195,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("[!] Interrupted while waiting to load next album:", e);
|
LOGGER.error("[!] Interrupted while waiting to load next album:", e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
|
|
||||||
posts = json.getJSONObject("response").getJSONArray("posts");
|
posts = json.getJSONObject("response").getJSONArray("posts");
|
||||||
if (posts.length() == 0) {
|
if (posts.length() == 0) {
|
||||||
logger.info(" Zero posts returned.");
|
LOGGER.info(" Zero posts returned.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
addURLToDownload(redirectedURL);
|
addURLToDownload(redirectedURL);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("[!] Error while parsing photo in " + photo, e);
|
LOGGER.error("[!] Error while parsing photo in " + photo, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (post.has("video_url")) {
|
} else if (post.has("video_url")) {
|
||||||
@ -259,7 +259,7 @@ public class TumblrRipper extends AlbumRipper {
|
|||||||
fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:"));
|
fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:"));
|
||||||
addURLToDownload(fileURL);
|
addURLToDownload(fileURL);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("[!] Error while parsing video in " + post, e);
|
LOGGER.error("[!] Error while parsing video in " + post, e);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,13 +103,13 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
.getJSONObject(resource)
|
.getJSONObject(resource)
|
||||||
.getJSONObject(api);
|
.getJSONObject(api);
|
||||||
int remaining = stats.getInt("remaining");
|
int remaining = stats.getInt("remaining");
|
||||||
logger.info(" Twitter " + resource + " calls remaining: " + remaining);
|
LOGGER.info(" Twitter " + resource + " calls remaining: " + remaining);
|
||||||
if (remaining < 20) {
|
if (remaining < 20) {
|
||||||
logger.error("Twitter API calls exhausted: " + stats.toString());
|
LOGGER.error("Twitter API calls exhausted: " + stats.toString());
|
||||||
throw new IOException("Less than 20 API calls remaining; not enough to rip.");
|
throw new IOException("Less than 20 API calls remaining; not enough to rip.");
|
||||||
}
|
}
|
||||||
} catch (JSONException e) {
|
} catch (JSONException e) {
|
||||||
logger.error("JSONException: ", e);
|
LOGGER.error("JSONException: ", e);
|
||||||
throw new IOException("Error while parsing JSON: " + body, e);
|
throw new IOException("Error while parsing JSON: " + body, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
|
|
||||||
private List<JSONObject> getTweets(String url) throws IOException {
|
private List<JSONObject> getTweets(String url) throws IOException {
|
||||||
List<JSONObject> tweets = new ArrayList<>();
|
List<JSONObject> tweets = new ArrayList<>();
|
||||||
logger.info(" Retrieving " + url);
|
LOGGER.info(" Retrieving " + url);
|
||||||
Document doc = Http.url(url)
|
Document doc = Http.url(url)
|
||||||
.ignoreContentType()
|
.ignoreContentType()
|
||||||
.header("Authorization", "Bearer " + accessToken)
|
.header("Authorization", "Bearer " + accessToken)
|
||||||
@ -171,7 +171,7 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
private int parseTweet(JSONObject tweet) throws MalformedURLException {
|
private int parseTweet(JSONObject tweet) throws MalformedURLException {
|
||||||
int parsedCount = 0;
|
int parsedCount = 0;
|
||||||
if (!tweet.has("extended_entities")) {
|
if (!tweet.has("extended_entities")) {
|
||||||
logger.error("XXX Tweet doesn't have entitites");
|
LOGGER.error("XXX Tweet doesn't have entitites");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
addURLToDownload(new URL(url));
|
addURLToDownload(new URL(url));
|
||||||
parsedCount++;
|
parsedCount++;
|
||||||
} else {
|
} else {
|
||||||
logger.debug("Unexpected media_url: " + url);
|
LOGGER.debug("Unexpected media_url: " + url);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -229,14 +229,14 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
for (int i = 0; i < MAX_REQUESTS; i++) {
|
for (int i = 0; i < MAX_REQUESTS; i++) {
|
||||||
List<JSONObject> tweets = getTweets(getApiURL(lastMaxID - 1));
|
List<JSONObject> tweets = getTweets(getApiURL(lastMaxID - 1));
|
||||||
if (tweets.isEmpty()) {
|
if (tweets.isEmpty()) {
|
||||||
logger.info(" No more tweets found.");
|
LOGGER.info(" No more tweets found.");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
logger.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
|
LOGGER.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
|
||||||
if (tweets.size() == 1 &&
|
if (tweets.size() == 1 &&
|
||||||
lastMaxID.equals(tweets.get(0).getString("id_str"))
|
lastMaxID.equals(tweets.get(0).getString("id_str"))
|
||||||
) {
|
) {
|
||||||
logger.info(" No more tweet found.");
|
LOGGER.info(" No more tweet found.");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,7 +256,7 @@ public class TwitterRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(WAIT_TIME);
|
Thread.sleep(WAIT_TIME);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("[!] Interrupted while waiting to load more results", e);
|
LOGGER.error("[!] Interrupted while waiting to load more results", e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
login();
|
login();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to login", e);
|
LOGGER.error("Failed to login", e);
|
||||||
}
|
}
|
||||||
String url = getURL(getGID(this.url), offset);
|
String url = getURL(getGID(this.url), offset);
|
||||||
return Http.url(url)
|
return Http.url(url)
|
||||||
|
@ -43,7 +43,7 @@ public class ViewcomicRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + title.trim();
|
return getHost() + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ public class VkRipper extends AlbumRipper {
|
|||||||
String[] jsonStrings = doc.toString().split("<!>");
|
String[] jsonStrings = doc.toString().split("<!>");
|
||||||
JSONObject json = new JSONObject(jsonStrings[jsonStrings.length - 1]);
|
JSONObject json = new JSONObject(jsonStrings[jsonStrings.length - 1]);
|
||||||
JSONArray videos = json.getJSONArray("all");
|
JSONArray videos = json.getJSONArray("all");
|
||||||
logger.info("Found " + videos.length() + " videos");
|
LOGGER.info("Found " + videos.length() + " videos");
|
||||||
for (int i = 0; i < videos.length(); i++) {
|
for (int i = 0; i < videos.length(); i++) {
|
||||||
JSONArray jsonVideo = videos.getJSONArray(i);
|
JSONArray jsonVideo = videos.getJSONArray(i);
|
||||||
int vidid = jsonVideo.getInt(1);
|
int vidid = jsonVideo.getInt(1);
|
||||||
@ -85,7 +85,7 @@ public class VkRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
logger.error("Interrupted while waiting to fetch next video URL", e);
|
LOGGER.error("Interrupted while waiting to fetch next video URL", e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ public class VkRipper extends AlbumRipper {
|
|||||||
Map<String,String> photoIDsToURLs = new HashMap<>();
|
Map<String,String> photoIDsToURLs = new HashMap<>();
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
logger.info(" Retrieving " + this.url);
|
LOGGER.info(" Retrieving " + this.url);
|
||||||
|
|
||||||
// al=1&offset=80&part=1
|
// al=1&offset=80&part=1
|
||||||
Map<String,String> postData = new HashMap<>();
|
Map<String,String> postData = new HashMap<>();
|
||||||
@ -119,7 +119,7 @@ public class VkRipper extends AlbumRipper {
|
|||||||
Set<String> photoIDsToGet = new HashSet<>();
|
Set<String> photoIDsToGet = new HashSet<>();
|
||||||
for (Element a : elements) {
|
for (Element a : elements) {
|
||||||
if (!a.attr("onclick").contains("showPhoto('")) {
|
if (!a.attr("onclick").contains("showPhoto('")) {
|
||||||
logger.error("a: " + a);
|
LOGGER.error("a: " + a);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
String photoID = a.attr("onclick");
|
String photoID = a.attr("onclick");
|
||||||
@ -134,12 +134,12 @@ public class VkRipper extends AlbumRipper {
|
|||||||
try {
|
try {
|
||||||
photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID));
|
photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Exception while retrieving photo id " + photoID, e);
|
LOGGER.error("Exception while retrieving photo id " + photoID, e);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!photoIDsToURLs.containsKey(photoID)) {
|
if (!photoIDsToURLs.containsKey(photoID)) {
|
||||||
logger.error("Could not find URL for photo ID: " + photoID);
|
LOGGER.error("Could not find URL for photo ID: " + photoID);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
String url = photoIDsToURLs.get(photoID);
|
String url = photoIDsToURLs.get(photoID);
|
||||||
|
@ -6,10 +6,11 @@ import com.rarchives.ripme.utils.Http;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.List;
|
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.json.JSONObject;
|
||||||
import org.jsoup.Jsoup;
|
import org.jsoup.Jsoup;
|
||||||
|
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
@ -19,7 +20,11 @@ import org.jsoup.select.Elements;
|
|||||||
/**
|
/**
|
||||||
* For ripping VSCO pictures.
|
* For ripping VSCO pictures.
|
||||||
*/
|
*/
|
||||||
public class VscoRipper extends AbstractHTMLRipper{
|
public class VscoRipper extends AbstractHTMLRipper {
|
||||||
|
|
||||||
|
int pageNumber = 1;
|
||||||
|
JSONObject profileJSON;
|
||||||
|
|
||||||
|
|
||||||
private static final String DOMAIN = "vsco.co",
|
private static final String DOMAIN = "vsco.co",
|
||||||
HOST = "vsco";
|
HOST = "vsco";
|
||||||
@ -73,33 +78,23 @@ public class VscoRipper extends AbstractHTMLRipper{
|
|||||||
try {
|
try {
|
||||||
toRip.add(vscoImageToURL(url.toExternalForm()));
|
toRip.add(vscoImageToURL(url.toExternalForm()));
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
logger.debug("Failed to convert " + url.toString() + " to external form.");
|
LOGGER.debug("Failed to convert " + url.toString() + " to external form.");
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {//want to rip a member profile
|
} else {
|
||||||
/*
|
String username = getUserName();
|
||||||
String baseURL = "https://vsco.co";
|
String userTkn = getUserTkn(username);
|
||||||
|
String siteID = getSiteID(userTkn, username);
|
||||||
|
while (true) {
|
||||||
//Find all the relative links, adds Base URL, then adds them to an ArrayList
|
profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID);
|
||||||
List<URL> relativeLinks = new ArrayList<>();
|
for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) {
|
||||||
Elements links = page.getElementsByTag("a");
|
toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url"));
|
||||||
|
|
||||||
|
|
||||||
for(Element link : links){
|
|
||||||
System.out.println(link.toString());
|
|
||||||
//if link includes "/media/", add it to the list
|
|
||||||
if (link.attr("href").contains("/media")) {
|
|
||||||
try {
|
|
||||||
String relativeURL = vscoImageToURL(link.attr("href"));
|
|
||||||
toRip.add(baseURL + relativeURL);
|
|
||||||
} catch (IOException ex) {
|
|
||||||
logger.debug("Could not add \"" + link.toString() + "\" to list for ripping.");
|
|
||||||
}
|
}
|
||||||
|
if (pageNumber * 1000 > profileJSON.getInt("total")) {
|
||||||
|
return toRip;
|
||||||
}
|
}
|
||||||
|
pageNumber++;
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
logger.debug("Sorry, RipMe currently only supports ripping single images.");
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -107,6 +102,59 @@ public class VscoRipper extends AbstractHTMLRipper{
|
|||||||
return toRip;
|
return toRip;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getUserTkn(String username) {
|
||||||
|
String userinfoPage = "https://vsco.co/content/Static/userinfo";
|
||||||
|
String referer = "https://vsco.co/" + username + "/images/1";
|
||||||
|
Map<String,String> cookies = new HashMap<>();
|
||||||
|
cookies.put("vs_anonymous_id", UUID.randomUUID().toString());
|
||||||
|
try {
|
||||||
|
Element doc = Http.url(userinfoPage).cookies(cookies).referrer(referer).ignoreContentType().get().body();
|
||||||
|
String json = doc.text().replaceAll("define\\(", "");
|
||||||
|
json = json.replaceAll("\\)", "");
|
||||||
|
return new JSONObject(json).getString("tkn");
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOGGER.error("Could not get user tkn");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getUserName() {
|
||||||
|
Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9]+)/images/[0-9]+");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
|
||||||
|
if (m.matches()) {
|
||||||
|
String user = m.group(1);
|
||||||
|
return user;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) {
|
||||||
|
String size = "1000";
|
||||||
|
String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size;
|
||||||
|
Map<String,String> cookies = new HashMap<>();
|
||||||
|
cookies.put("vs", tkn);
|
||||||
|
try {
|
||||||
|
JSONObject j = Http.url(purl).cookies(cookies).getJSON();
|
||||||
|
return j;
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOGGER.error("Could not profile images");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getSiteID(String tkn, String username) {
|
||||||
|
Map<String,String> cookies = new HashMap<>();
|
||||||
|
cookies.put("vs", tkn);
|
||||||
|
try {
|
||||||
|
JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON();
|
||||||
|
return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id"));
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOGGER.error("Could not get site id");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private String vscoImageToURL(String url) throws IOException{
|
private String vscoImageToURL(String url) throws IOException{
|
||||||
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
|
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
|
||||||
.get();
|
.get();
|
||||||
@ -121,14 +169,14 @@ public class VscoRipper extends AbstractHTMLRipper{
|
|||||||
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
|
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
|
||||||
|
|
||||||
result = givenURL;
|
result = givenURL;
|
||||||
logger.debug("Found image URL: " + givenURL);
|
LOGGER.debug("Found image URL: " + givenURL);
|
||||||
break;//immediatly stop after getting URL (there should only be 1 image to be downloaded)
|
break;//immediatly stop after getting URL (there should only be 1 image to be downloaded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Means website changed, things need to be fixed.
|
//Means website changed, things need to be fixed.
|
||||||
if (result.isEmpty()){
|
if (result.isEmpty()){
|
||||||
logger.error("Could not find image URL at: " + url);
|
LOGGER.error("Could not find image URL at: " + url);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -177,11 +225,6 @@ public class VscoRipper extends AbstractHTMLRipper{
|
|||||||
return Http.url(url).get();
|
return Http.url(url).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Document getNextPage(Document doc) throws IOException {
|
|
||||||
return super.getNextPage(doc);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void downloadURL(URL url, int index) {
|
public void downloadURL(URL url, int index) {
|
||||||
addURLToDownload(url, getPrefix(index));
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
@ -36,7 +36,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
|||||||
URLToReturn = URLToReturn.replaceAll("m.xhamster.com", "xhamster.com");
|
URLToReturn = URLToReturn.replaceAll("m.xhamster.com", "xhamster.com");
|
||||||
URLToReturn = URLToReturn.replaceAll("\\w\\w.xhamster.com", "xhamster.com");
|
URLToReturn = URLToReturn.replaceAll("\\w\\w.xhamster.com", "xhamster.com");
|
||||||
URL san_url = new URL(URLToReturn.replaceAll("xhamster.com", "m.xhamster.com"));
|
URL san_url = new URL(URLToReturn.replaceAll("xhamster.com", "m.xhamster.com"));
|
||||||
logger.info("sanitized URL is " + san_url.toExternalForm());
|
LOGGER.info("sanitized URL is " + san_url.toExternalForm());
|
||||||
return san_url;
|
return san_url;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,11 +55,11 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
Element authorSpan = getFirstPage().select("span[class=creator]").first();
|
Element authorSpan = getFirstPage().select("span[class=creator]").first();
|
||||||
String author = authorSpan.select("a").first().text();
|
String author = authorSpan.select("a").first().text();
|
||||||
logger.debug("Author: " + author);
|
LOGGER.debug("Author: " + author);
|
||||||
return getHost() + "_" + author + "_" + title.trim();
|
return getHost() + "_" + author + "_" + title.trim();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Fall back to default album naming convention
|
// Fall back to default album naming convention
|
||||||
logger.info("Unable to find title at " + url);
|
LOGGER.info("Unable to find title at " + url);
|
||||||
}
|
}
|
||||||
return super.getAlbumTitle(url);
|
return super.getAlbumTitle(url);
|
||||||
}
|
}
|
||||||
@ -78,9 +78,9 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
|||||||
public List<String> getURLsFromPage(Document page) {
|
public List<String> getURLsFromPage(Document page) {
|
||||||
List<String> imageURLs = new ArrayList<>();
|
List<String> imageURLs = new ArrayList<>();
|
||||||
// Page contains images
|
// Page contains images
|
||||||
logger.info("Look for images.");
|
LOGGER.info("Look for images.");
|
||||||
for (Element thumb : page.select("img")) {
|
for (Element thumb : page.select("img")) {
|
||||||
logger.info("Img");
|
LOGGER.info("Img");
|
||||||
if (super.isStopped()) break;
|
if (super.isStopped()) break;
|
||||||
// Find thumbnail image source
|
// Find thumbnail image source
|
||||||
String image = null;
|
String image = null;
|
||||||
@ -89,7 +89,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
|||||||
if (thumb.hasAttr("typeof")) {
|
if (thumb.hasAttr("typeof")) {
|
||||||
img_type = thumb.attr("typeof");
|
img_type = thumb.attr("typeof");
|
||||||
if (img_type.equals("foaf:Image")) {
|
if (img_type.equals("foaf:Image")) {
|
||||||
logger.debug("Found image with " + img_type);
|
LOGGER.debug("Found image with " + img_type);
|
||||||
if (thumb.parent() != null &&
|
if (thumb.parent() != null &&
|
||||||
thumb.parent().parent() != null &&
|
thumb.parent().parent() != null &&
|
||||||
thumb.parent().parent().attr("class") != null &&
|
thumb.parent().parent().attr("class") != null &&
|
||||||
@ -97,7 +97,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
|||||||
)
|
)
|
||||||
{
|
{
|
||||||
src = thumb.attr("src");
|
src = thumb.attr("src");
|
||||||
logger.debug("Found url with " + src);
|
LOGGER.debug("Found url with " + src);
|
||||||
if (!src.contains("zizki.com")) {
|
if (!src.contains("zizki.com")) {
|
||||||
} else {
|
} else {
|
||||||
imageURLs.add(src.replace("/styles/medium/public/","/styles/large/public/"));
|
imageURLs.add(src.replace("/styles/medium/public/","/styles/large/public/"));
|
||||||
|
@ -55,7 +55,7 @@ public class CliphunterRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
String html = Http.url(url).get().html();
|
String html = Http.url(url).get().html();
|
||||||
String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21);
|
String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21);
|
||||||
jsonString = jsonString.substring(0, jsonString.indexOf("'"));
|
jsonString = jsonString.substring(0, jsonString.indexOf("'"));
|
||||||
|
@ -65,7 +65,7 @@ public class GfycatRipper extends VideoRipper {
|
|||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static String getVideoURL(URL url) throws IOException {
|
public static String getVideoURL(URL url) throws IOException {
|
||||||
logger.info("Retrieving " + url.toExternalForm());
|
LOGGER.info("Retrieving " + url.toExternalForm());
|
||||||
|
|
||||||
//Sanitize the URL first
|
//Sanitize the URL first
|
||||||
url = new URL(url.toExternalForm().replace("/gifs/detail", ""));
|
url = new URL(url.toExternalForm().replace("/gifs/detail", ""));
|
||||||
|
@ -52,10 +52,10 @@ public class MotherlessVideoRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url);
|
LOGGER.info(" Retrieving " + this.url);
|
||||||
String html = Http.url(this.url).get().toString();
|
String html = Http.url(this.url).get().toString();
|
||||||
if (html.contains("__fileurl = '")) {
|
if (html.contains("__fileurl = '")) {
|
||||||
logger.error("WTF");
|
LOGGER.error("WTF");
|
||||||
}
|
}
|
||||||
List<String> vidUrls = Utils.between(html, "__fileurl = '", "';");
|
List<String> vidUrls = Utils.between(html, "__fileurl = '", "';");
|
||||||
if (vidUrls.isEmpty()) {
|
if (vidUrls.isEmpty()) {
|
||||||
|
@ -54,7 +54,7 @@ public class PornhubRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
String html = doc.body().html();
|
String html = doc.body().html();
|
||||||
Pattern p = Pattern.compile("^.*flashvars_[0-9]+ = (.+});.*$", Pattern.DOTALL);
|
Pattern p = Pattern.compile("^.*flashvars_[0-9]+ = (.+});.*$", Pattern.DOTALL);
|
||||||
@ -81,10 +81,10 @@ public class PornhubRipper extends VideoRipper {
|
|||||||
}
|
}
|
||||||
addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
|
addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
|
||||||
} catch (JSONException e) {
|
} catch (JSONException e) {
|
||||||
logger.error("Error while parsing JSON at " + url, e);
|
LOGGER.error("Error while parsing JSON at " + url, e);
|
||||||
throw e;
|
throw e;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Error while retrieving video URL at " + url, e);
|
LOGGER.error("Error while retrieving video URL at " + url, e);
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ public class TwitchVideoRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
|
|
||||||
//Get user friendly filename from page title
|
//Get user friendly filename from page title
|
||||||
|
@ -53,7 +53,7 @@ public class ViddmeRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
Elements videos = doc.select("meta[name=twitter:player:stream]");
|
Elements videos = doc.select("meta[name=twitter:player:stream]");
|
||||||
if (videos.isEmpty()) {
|
if (videos.isEmpty()) {
|
||||||
|
@ -54,7 +54,7 @@ public class VidearnRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
List<String> mp4s = Utils.between(doc.html(), "file:\"", "\"");
|
List<String> mp4s = Utils.between(doc.html(), "file:\"", "\"");
|
||||||
if (mp4s.isEmpty()) {
|
if (mp4s.isEmpty()) {
|
||||||
|
@ -54,7 +54,7 @@ public class VineRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
Elements props = doc.select("meta[property=twitter:player:stream]");
|
Elements props = doc.select("meta[property=twitter:player:stream]");
|
||||||
if (props.isEmpty()) {
|
if (props.isEmpty()) {
|
||||||
|
@ -52,7 +52,7 @@ public class VkRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url);
|
LOGGER.info(" Retrieving " + this.url);
|
||||||
String videoURL = getVideoURLAtPage(this.url.toExternalForm());
|
String videoURL = getVideoURLAtPage(this.url.toExternalForm());
|
||||||
addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
|
addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
|
||||||
waitForThreads();
|
waitForThreads();
|
||||||
|
@ -54,7 +54,7 @@ public class XhamsterRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
Elements videos = doc.select("div.player-container > a");
|
Elements videos = doc.select("div.player-container > a");
|
||||||
if (videos.isEmpty()) {
|
if (videos.isEmpty()) {
|
||||||
|
@ -54,12 +54,12 @@ public class XvideosRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url);
|
LOGGER.info(" Retrieving " + this.url);
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
Elements scripts = doc.select("script");
|
Elements scripts = doc.select("script");
|
||||||
for (Element e : scripts) {
|
for (Element e : scripts) {
|
||||||
if (e.html().contains("html5player.setVideoUrlHigh")) {
|
if (e.html().contains("html5player.setVideoUrlHigh")) {
|
||||||
logger.info("Found the right script");
|
LOGGER.info("Found the right script");
|
||||||
String[] lines = e.html().split("\n");
|
String[] lines = e.html().split("\n");
|
||||||
for (String line: lines) {
|
for (String line: lines) {
|
||||||
if (line.contains("html5player.setVideoUrlHigh")) {
|
if (line.contains("html5player.setVideoUrlHigh")) {
|
||||||
|
@ -54,7 +54,7 @@ public class YoupornRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info(" Retrieving " + this.url);
|
LOGGER.info(" Retrieving " + this.url);
|
||||||
Document doc = Http.url(this.url).get();
|
Document doc = Http.url(this.url).get();
|
||||||
Elements videos = doc.select("video");
|
Elements videos = doc.select("video");
|
||||||
if (videos.isEmpty()) {
|
if (videos.isEmpty()) {
|
||||||
|
@ -55,7 +55,7 @@ public class YuvutuRipper extends VideoRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rip() throws IOException {
|
public void rip() throws IOException {
|
||||||
logger.info("Retrieving " + this.url);
|
LOGGER.info("Retrieving " + this.url);
|
||||||
Document doc = Http.url(url).get();
|
Document doc = Http.url(url).get();
|
||||||
Element iframe = doc.select("iframe").first();
|
Element iframe = doc.select("iframe").first();
|
||||||
String iframeSrc = iframe.attr("src");
|
String iframeSrc = iframe.attr("src");
|
||||||
|
@ -66,7 +66,7 @@ import javax.swing.UnsupportedLookAndFeelException;
|
|||||||
*/
|
*/
|
||||||
public final class MainWindow implements Runnable, RipStatusHandler {
|
public final class MainWindow implements Runnable, RipStatusHandler {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(MainWindow.class);
|
private static final Logger LOGGER = Logger.getLogger(MainWindow.class);
|
||||||
|
|
||||||
private boolean isRipping = false; // Flag to indicate if we're ripping something
|
private boolean isRipping = false; // Flag to indicate if we're ripping something
|
||||||
|
|
||||||
@ -279,7 +279,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
|
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
|
||||||
} catch (ClassNotFoundException | InstantiationException | UnsupportedLookAndFeelException | IllegalAccessException e) {
|
} catch (ClassNotFoundException | InstantiationException | UnsupportedLookAndFeelException | IllegalAccessException e) {
|
||||||
logger.error("[!] Exception setting system theme:", e);
|
LOGGER.error("[!] Exception setting system theme:", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
ripTextfield = new JTextField("", 20);
|
ripTextfield = new JTextField("", 20);
|
||||||
@ -821,7 +821,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
chosenPath = chosenFile.getCanonicalPath();
|
chosenPath = chosenFile.getCanonicalPath();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Error while getting selected path: ", e);
|
LOGGER.error("Error while getting selected path: ", e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
configSaveDirLabel.setText(Utils.shortenPath(chosenPath));
|
configSaveDirLabel.setText(Utils.shortenPath(chosenPath));
|
||||||
@ -877,7 +877,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Logger.getRootLogger().setLevel(newLevel);
|
Logger.getRootLogger().setLevel(newLevel);
|
||||||
logger.setLevel(newLevel);
|
LOGGER.setLevel(newLevel);
|
||||||
ConsoleAppender ca = (ConsoleAppender)Logger.getRootLogger().getAppender("stdout");
|
ConsoleAppender ca = (ConsoleAppender)Logger.getRootLogger().getAppender("stdout");
|
||||||
if (ca != null) {
|
if (ca != null) {
|
||||||
ca.setThreshold(newLevel);
|
ca.setThreshold(newLevel);
|
||||||
@ -951,7 +951,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
Desktop.getDesktop().browse(URI.create("http://github.com/ripmeapp/ripme"));
|
Desktop.getDesktop().browse(URI.create("http://github.com/ripmeapp/ripme"));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Exception while opening project home page", e);
|
LOGGER.error("Exception while opening project home page", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -1024,10 +1024,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
HISTORY.clear();
|
HISTORY.clear();
|
||||||
if (historyFile.exists()) {
|
if (historyFile.exists()) {
|
||||||
try {
|
try {
|
||||||
logger.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
|
LOGGER.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
|
||||||
HISTORY.fromFile(historyFile.getCanonicalPath());
|
HISTORY.fromFile(historyFile.getCanonicalPath());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to load history from file " + historyFile, e);
|
LOGGER.error("Failed to load history from file " + historyFile, e);
|
||||||
JOptionPane.showMessageDialog(null,
|
JOptionPane.showMessageDialog(null,
|
||||||
"RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" +
|
"RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" +
|
||||||
"Error: " + e.getMessage() + "\n\n" +
|
"Error: " + e.getMessage() + "\n\n" +
|
||||||
@ -1037,7 +1037,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
JOptionPane.ERROR_MESSAGE);
|
JOptionPane.ERROR_MESSAGE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.info(rb.getString("loading.history.from.configuration"));
|
LOGGER.info(rb.getString("loading.history.from.configuration"));
|
||||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||||
if (HISTORY.toList().isEmpty()) {
|
if (HISTORY.toList().isEmpty()) {
|
||||||
// Loaded from config, still no entries.
|
// Loaded from config, still no entries.
|
||||||
@ -1067,7 +1067,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
HISTORY.toFile(historyFile.toString());
|
HISTORY.toFile(historyFile.toString());
|
||||||
Utils.setConfigList("download.history", Collections.emptyList());
|
Utils.setConfigList("download.history", Collections.emptyList());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.error("Failed to save history to file " + historyFile, e);
|
LOGGER.error("Failed to save history to file " + historyFile, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1089,7 +1089,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
logger.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
|
LOGGER.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
|
||||||
}
|
}
|
||||||
ripNextAlbum();
|
ripNextAlbum();
|
||||||
} else {
|
} else {
|
||||||
@ -1113,7 +1113,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
url = new URL(urlString);
|
url = new URL(urlString);
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
logger.error("[!] Could not generate URL for '" + urlString + "'", e);
|
LOGGER.error("[!] Could not generate URL for '" + urlString + "'", e);
|
||||||
error("Given URL is not valid, expecting http://website.com/page/...");
|
error("Given URL is not valid, expecting http://website.com/page/...");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -1128,7 +1128,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
ripper.setup();
|
ripper.setup();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
failed = true;
|
failed = true;
|
||||||
logger.error("Could not find ripper for URL " + url, e);
|
LOGGER.error("Could not find ripper for URL " + url, e);
|
||||||
error(e.getMessage());
|
error(e.getMessage());
|
||||||
}
|
}
|
||||||
if (!failed) {
|
if (!failed) {
|
||||||
@ -1146,7 +1146,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
}
|
}
|
||||||
return t;
|
return t;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("[!] Error while ripping: " + e.getMessage(), e);
|
LOGGER.error("[!] Error while ripping: " + e.getMessage(), e);
|
||||||
error("Unable to rip this URL: " + e.getMessage());
|
error("Unable to rip this URL: " + e.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1197,28 +1197,28 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
switch(msg.getStatus()) {
|
switch(msg.getStatus()) {
|
||||||
case LOADING_RESOURCE:
|
case LOADING_RESOURCE:
|
||||||
case DOWNLOAD_STARTED:
|
case DOWNLOAD_STARTED:
|
||||||
if (logger.isEnabledFor(Level.INFO)) {
|
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||||
appendLog("Downloading " + msg.getObject(), Color.BLACK);
|
appendLog("Downloading " + msg.getObject(), Color.BLACK);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DOWNLOAD_COMPLETE:
|
case DOWNLOAD_COMPLETE:
|
||||||
if (logger.isEnabledFor(Level.INFO)) {
|
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||||
appendLog("Downloaded " + msg.getObject(), Color.GREEN);
|
appendLog("Downloaded " + msg.getObject(), Color.GREEN);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DOWNLOAD_ERRORED:
|
case DOWNLOAD_ERRORED:
|
||||||
if (logger.isEnabledFor(Level.ERROR)) {
|
if (LOGGER.isEnabledFor(Level.ERROR)) {
|
||||||
appendLog((String) msg.getObject(), Color.RED);
|
appendLog((String) msg.getObject(), Color.RED);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DOWNLOAD_WARN:
|
case DOWNLOAD_WARN:
|
||||||
if (logger.isEnabledFor(Level.WARN)) {
|
if (LOGGER.isEnabledFor(Level.WARN)) {
|
||||||
appendLog((String) msg.getObject(), Color.ORANGE);
|
appendLog((String) msg.getObject(), Color.ORANGE);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RIP_ERRORED:
|
case RIP_ERRORED:
|
||||||
if (logger.isEnabledFor(Level.ERROR)) {
|
if (LOGGER.isEnabledFor(Level.ERROR)) {
|
||||||
appendLog((String) msg.getObject(), Color.RED);
|
appendLog((String) msg.getObject(), Color.RED);
|
||||||
}
|
}
|
||||||
stopButton.setEnabled(false);
|
stopButton.setEnabled(false);
|
||||||
@ -1270,7 +1270,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
try {
|
try {
|
||||||
Desktop.getDesktop().open(new File(event.getActionCommand()));
|
Desktop.getDesktop().open(new File(event.getActionCommand()));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error(e);
|
LOGGER.error(e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
pack();
|
pack();
|
||||||
@ -1341,7 +1341,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
Utils.setConfigInteger("window.y", y);
|
Utils.setConfigInteger("window.y", y);
|
||||||
Utils.setConfigInteger("window.w", w);
|
Utils.setConfigInteger("window.w", w);
|
||||||
Utils.setConfigInteger("window.h", h);
|
Utils.setConfigInteger("window.h", h);
|
||||||
logger.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")");
|
LOGGER.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")");
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void restoreWindowPosition(Frame frame) {
|
private static void restoreWindowPosition(Frame frame) {
|
||||||
@ -1356,7 +1356,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
|||||||
int w = Utils.getConfigInteger("window.w", -1);
|
int w = Utils.getConfigInteger("window.w", -1);
|
||||||
int h = Utils.getConfigInteger("window.h", -1);
|
int h = Utils.getConfigInteger("window.h", -1);
|
||||||
if (x < 0 || y < 0 || w <= 0 || h <= 0) {
|
if (x < 0 || y < 0 || w <= 0 || h <= 0) {
|
||||||
logger.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config");
|
LOGGER.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config");
|
||||||
mainFrame.setLocationRelativeTo(null); // default to middle of screen
|
mainFrame.setLocationRelativeTo(null); // default to middle of screen
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
public class UpdateUtils {
|
public class UpdateUtils {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||||
private static final String DEFAULT_VERSION = "1.7.50";
|
private static final String DEFAULT_VERSION = "1.7.51";
|
||||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||||
private static final String mainFileName = "ripme.jar";
|
private static final String mainFileName = "ripme.jar";
|
||||||
@ -39,6 +39,20 @@ public class UpdateUtils {
|
|||||||
}
|
}
|
||||||
return thisVersion;
|
return thisVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String getChangeList(JSONObject rj) {
|
||||||
|
JSONArray jsonChangeList = rj.getJSONArray("changeList");
|
||||||
|
StringBuilder changeList = new StringBuilder();
|
||||||
|
for (int i = 0; i < jsonChangeList.length(); i++) {
|
||||||
|
String change = jsonChangeList.getString(i);
|
||||||
|
if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
changeList.append("\n").append(change);
|
||||||
|
}
|
||||||
|
return changeList.toString();
|
||||||
|
}
|
||||||
|
|
||||||
public static void updateProgramCLI() {
|
public static void updateProgramCLI() {
|
||||||
logger.info("Checking for update...");
|
logger.info("Checking for update...");
|
||||||
|
|
||||||
@ -61,15 +75,10 @@ public class UpdateUtils {
|
|||||||
}
|
}
|
||||||
String jsonString = doc.body().html().replaceAll(""", "\"");
|
String jsonString = doc.body().html().replaceAll(""", "\"");
|
||||||
ripmeJson = new JSONObject(jsonString);
|
ripmeJson = new JSONObject(jsonString);
|
||||||
JSONArray jsonChangeList = ripmeJson.getJSONArray("changeList");
|
|
||||||
StringBuilder changeList = new StringBuilder();
|
String changeList = getChangeList(ripmeJson);
|
||||||
for (int i = 0; i < jsonChangeList.length(); i++) {
|
|
||||||
String change = jsonChangeList.getString(i);
|
logger.info("Change log: \n" + changeList);
|
||||||
if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
changeList.append("<br> + ").append(change);
|
|
||||||
}
|
|
||||||
|
|
||||||
String latestVersion = ripmeJson.getString("latestVersion");
|
String latestVersion = ripmeJson.getString("latestVersion");
|
||||||
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
||||||
@ -111,15 +120,8 @@ public class UpdateUtils {
|
|||||||
}
|
}
|
||||||
String jsonString = doc.body().html().replaceAll(""", "\"");
|
String jsonString = doc.body().html().replaceAll(""", "\"");
|
||||||
ripmeJson = new JSONObject(jsonString);
|
ripmeJson = new JSONObject(jsonString);
|
||||||
JSONArray jsonChangeList = ripmeJson.getJSONArray("changeList");
|
|
||||||
StringBuilder changeList = new StringBuilder();
|
String changeList = getChangeList(ripmeJson);
|
||||||
for (int i = 0; i < jsonChangeList.length(); i++) {
|
|
||||||
String change = jsonChangeList.getString(i);
|
|
||||||
if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
changeList.append("<br> + ").append(change);
|
|
||||||
}
|
|
||||||
|
|
||||||
String latestVersion = ripmeJson.getString("latestVersion");
|
String latestVersion = ripmeJson.getString("latestVersion");
|
||||||
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
||||||
@ -127,7 +129,7 @@ public class UpdateUtils {
|
|||||||
int result = JOptionPane.showConfirmDialog(
|
int result = JOptionPane.showConfirmDialog(
|
||||||
null,
|
null,
|
||||||
"<html><font color=\"green\">New version (" + latestVersion + ") is available!</font>"
|
"<html><font color=\"green\">New version (" + latestVersion + ") is available!</font>"
|
||||||
+ "<br><br>Recent changes:" + changeList.toString()
|
+ "<br><br>Recent changes:" + changeList
|
||||||
+ "<br><br>Do you want to download and run the newest version?</html>",
|
+ "<br><br>Do you want to download and run the newest version?</html>",
|
||||||
"RipMe Updater",
|
"RipMe Updater",
|
||||||
JOptionPane.YES_NO_OPTION);
|
JOptionPane.YES_NO_OPTION);
|
||||||
@ -157,6 +159,11 @@ public class UpdateUtils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static boolean isNewerVersion(String latestVersion) {
|
private static boolean isNewerVersion(String latestVersion) {
|
||||||
|
// If we're testing the update utils we want the program to always try to update
|
||||||
|
if (Utils.getConfigBoolean("testing.always_try_to_update", false)) {
|
||||||
|
logger.info("isNewerVersion is returning true because the key \"testing.always_try_to_update\" is true");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
int[] oldVersions = versionStringToInt(getThisJarVersion());
|
int[] oldVersions = versionStringToInt(getThisJarVersion());
|
||||||
int[] newVersions = versionStringToInt(latestVersion);
|
int[] newVersions = versionStringToInt(latestVersion);
|
||||||
if (oldVersions.length < newVersions.length) {
|
if (oldVersions.length < newVersions.length) {
|
||||||
@ -227,6 +234,8 @@ public class UpdateUtils {
|
|||||||
try (FileOutputStream out = new FileOutputStream(updateFileName)) {
|
try (FileOutputStream out = new FileOutputStream(updateFileName)) {
|
||||||
out.write(response.bodyAsBytes());
|
out.write(response.bodyAsBytes());
|
||||||
}
|
}
|
||||||
|
// Only check the hash if the user hasn't disabled hash checking
|
||||||
|
if (Utils.getConfigBoolean("security.check_update_hash", true)) {
|
||||||
String updateHash = createSha256(new File(updateFileName));
|
String updateHash = createSha256(new File(updateFileName));
|
||||||
logger.info("Download of new version complete; saved to " + updateFileName);
|
logger.info("Download of new version complete; saved to " + updateFileName);
|
||||||
logger.info("Checking hash of update");
|
logger.info("Checking hash of update");
|
||||||
@ -239,6 +248,7 @@ public class UpdateUtils {
|
|||||||
} else {
|
} else {
|
||||||
logger.info("Hash is good");
|
logger.info("Hash is good");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (shouldLaunch) {
|
if (shouldLaunch) {
|
||||||
// Setup updater script
|
// Setup updater script
|
||||||
final String batchFile, script;
|
final String batchFile, script;
|
||||||
|
Loading…
Reference in New Issue
Block a user