commit
1c902751d3
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -1,6 +1,8 @@
|
||||
* Ripme version:
|
||||
* Java version: <!-- (output of `java -version`) -->
|
||||
* Operating system: <!-- (if Windows, output of `ver` or `winver`) -->
|
||||
<!-- Please do not link to content featuring underage characters even if the characters are drawn.
|
||||
These works are still illegal in many places including much of America -->
|
||||
* Exact URL you were trying to rip when the problem occurred:
|
||||
* Please include any additional information about how to reproduce the problem:
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
# Contribute
|
||||
|
||||
RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](https://github.com/metaprime)** and **[@cyian-1756](https://github.com/cyian-1756)**. If you'd like to contribute but aren't good with code, help keep us happy with a small contribution!
|
||||
RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](https://github.com/metaprime)**, **[@cyian-1756](https://github.com/cyian-1756)** and **[@kevin51jiang](https://github.com/kevin51jiang)**. If you'd like to contribute but aren't good with code, help keep us happy with a small contribution!
|
||||
|
||||
[![Tip with PayPal](https://img.shields.io/badge/PayPal-Buy_us...-lightgrey.svg)](https://www.paypal.me/ripmeapp)
|
||||
[![Tip with PayPal](https://img.shields.io/badge/coffee-%245-green.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=5.00¤cyCode=USD&locale.x=en_US&country.x=US)
|
||||
@ -24,7 +24,7 @@ RipMe is an album ripper for various websites. Runs on your computer. Requires J
|
||||
|
||||
Download `ripme.jar` from the [latest release](https://github.com/ripmeapp/ripme/releases).
|
||||
|
||||
**Note: If you're currently using version 1.2.x or 1.3.x, you will not automatically get updates to the newest versions. We recommend downloading the latest version from the link above.**
|
||||
**Note: If you're currently using version 1.2.x, 1.3.x or 1.7.49, you will not automatically get updates to the newest versions. We recommend downloading the latest version from the link above.**
|
||||
|
||||
For information about running the `.jar` file, see [the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe).
|
||||
|
||||
@ -37,6 +37,7 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
|
||||
* Built in updater
|
||||
* Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image)
|
||||
* Skips already downloaded images by default
|
||||
* Can auto skip e-hentai and nhentai albums containing certain tags [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags)
|
||||
|
||||
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
||||
|
||||
|
@ -1 +1,2 @@
|
||||
mvn clean compile assembly:single
|
||||
mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar
|
2
build.sh
2
build.sh
@ -1,2 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
mvn clean compile assembly:single
|
||||
# Strip the jar of any non-reproducible metadata such as timestamps
|
||||
mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar
|
59
patch.py
59
patch.py
@ -1,19 +1,42 @@
|
||||
import json
|
||||
import subprocess
|
||||
from hashlib import sha256
|
||||
|
||||
# This script will:
|
||||
# - read current version
|
||||
# - increment patch version
|
||||
# - update version in a few places
|
||||
# - insert new line in ripme.json with message
|
||||
|
||||
# - build ripme
|
||||
# - add the hash of the latest binary to ripme.json
|
||||
# - commit all changes
|
||||
message = input('message: ')
|
||||
|
||||
with open('ripme.json') as dataFile:
|
||||
ripmeJson = json.load(dataFile)
|
||||
currentVersion = ripmeJson["latestVersion"]
|
||||
|
||||
print ('Current version ' + currentVersion)
|
||||
def get_ripme_json():
|
||||
with open('ripme.json') as dataFile:
|
||||
ripmeJson = json.load(dataFile)
|
||||
return ripmeJson
|
||||
|
||||
|
||||
def update_hash(current_hash):
|
||||
ripmeJson = get_ripme_json()
|
||||
with open('ripme.json', 'w') as dataFile:
|
||||
ripmeJson["currentHash"] = current_hash
|
||||
print(ripmeJson["currentHash"])
|
||||
json.dump(ripmeJson, dataFile, indent=4)
|
||||
|
||||
|
||||
def update_change_list(message):
|
||||
ripmeJson = get_ripme_json()
|
||||
with open('ripme.json', 'w') as dataFile:
|
||||
ripmeJson["changeList"].insert(0, message)
|
||||
json.dump(ripmeJson, dataFile, indent=4)
|
||||
|
||||
|
||||
currentVersion = get_ripme_json()["latestVersion"]
|
||||
|
||||
print('Current version ' + currentVersion)
|
||||
|
||||
versionFields = currentVersion.split('.')
|
||||
patchCur = int(versionFields[2])
|
||||
@ -22,14 +45,14 @@ majorMinor = versionFields[:2]
|
||||
majorMinor.append(str(patchNext))
|
||||
nextVersion = '.'.join(majorMinor)
|
||||
|
||||
print ('Updating to ' + nextVersion)
|
||||
print('Updating to ' + nextVersion)
|
||||
|
||||
substrExpr = 's/' + currentVersion + '/' + nextVersion + '/'
|
||||
subprocess.call(['sed', '-i', '-e', substrExpr, 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'])
|
||||
subprocess.call(['git', 'grep', 'DEFAULT_VERSION.*' + nextVersion,
|
||||
'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java'])
|
||||
|
||||
substrExpr = 's/\\\"latestVersion\\\": \\\"' + currentVersion + '\\\"/\\\"latestVersion\\\": \\\"' +\
|
||||
substrExpr = 's/\\\"latestVersion\\\": \\\"' + currentVersion + '\\\"/\\\"latestVersion\\\": \\\"' + \
|
||||
nextVersion + '\\\"/'
|
||||
subprocess.call(['sed', '-i', '-e', substrExpr, 'ripme.json'])
|
||||
subprocess.call(['git', 'grep', 'latestVersion', 'ripme.json'])
|
||||
@ -39,18 +62,22 @@ subprocess.call(['sed', '-i', '-e', substrExpr, 'pom.xml'])
|
||||
subprocess.call(['git', 'grep', '<version>' + nextVersion + '</version>', 'pom.xml'])
|
||||
|
||||
commitMessage = nextVersion + ': ' + message
|
||||
changeLogLine = ' \"' + commitMessage + '\",\n'
|
||||
|
||||
dataFile = open("ripme.json", "r")
|
||||
ripmeJsonLines = dataFile.readlines()
|
||||
ripmeJsonLines.insert(3, changeLogLine)
|
||||
outputContent = ''.join(ripmeJsonLines)
|
||||
dataFile.close()
|
||||
update_change_list(commitMessage)
|
||||
|
||||
dataFile = open("ripme.json", "w")
|
||||
dataFile.write(outputContent)
|
||||
dataFile.close()
|
||||
|
||||
print("Building ripme")
|
||||
subprocess.call(["mvn", "clean", "compile", "assembly:single"])
|
||||
print("Stripping jar")
|
||||
subprocess.call(["mvn", "io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar"])
|
||||
print("Hashing .jar file")
|
||||
openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb")
|
||||
readFile = openedFile.read()
|
||||
file_hash = sha256(readFile).hexdigest()
|
||||
print("Hash is: {}".format(file_hash))
|
||||
print("Updating hash")
|
||||
update_hash(file_hash)
|
||||
subprocess.call(['git', 'add', '-u'])
|
||||
subprocess.call(['git', 'commit', '-m', commitMessage])
|
||||
subprocess.call(['git', 'tag', nextVersion])
|
||||
print("Remember to run `git push origin master` before release.py")
|
||||
|
7
pom.xml
7
pom.xml
@ -4,7 +4,7 @@
|
||||
<groupId>com.rarchives.ripme</groupId>
|
||||
<artifactId>ripme</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<version>1.7.47</version>
|
||||
<version>1.7.55</version>
|
||||
<name>ripme</name>
|
||||
<url>http://rip.rarchives.com</url>
|
||||
<properties>
|
||||
@ -61,6 +61,11 @@
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>io.github.zlika</groupId>
|
||||
<artifactId>reproducible-build-maven-plugin</artifactId>
|
||||
<version>0.6</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
|
117
release.py
Executable file
117
release.py
Executable file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
|
||||
import os
|
||||
|
||||
import sys
|
||||
from hashlib import sha256
|
||||
from github import Github
|
||||
import json
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Make a new ripme release on github")
|
||||
parser.add_argument("-f", "--file", help="Path to the version of ripme to release")
|
||||
parser.add_argument("-t", "--token", help="Your github personal access token")
|
||||
parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true")
|
||||
parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true")
|
||||
parser.add_argument("--test", help="Perform a dry run (Do everything but upload new release)", action="store_true")
|
||||
parser.add_argument("--skip-hash-check", help="Skip hash check (This should only be used for testing)", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# This binds input to raw_input on python2, we do this because input acts like eval on python2
|
||||
input = raw_input
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
|
||||
# Make sure the file the user selected is a jar
|
||||
def isJar(filename):
|
||||
if debug:
|
||||
print("Checking if {} is a jar file".format(filename))
|
||||
return filename.endswith("jar")
|
||||
|
||||
|
||||
# Returns true if last entry to the "changeList" section of ripme.json is in the format of $number.$number.$number: and
|
||||
# false if not
|
||||
def isValidCommitMessage(message):
|
||||
if debug:
|
||||
print(r"Checking if {} matches pattern ^\d+\.\d+\.\d+:".format(message))
|
||||
pattern = re.compile(r"^\d+\.\d+\.\d+:")
|
||||
return re.match(pattern, message)
|
||||
|
||||
|
||||
# Checks if the update has the name ripme.jar, if not it renames the file
|
||||
def checkAndRenameFile(path):
|
||||
"""Check if path (a string) points to a ripme.jar. Returns the possibly renamed file path"""
|
||||
if not path.endswith("ripme.jar"):
|
||||
print("Specified file is not named ripme.jar, renaming")
|
||||
new_path = os.path.join(os.path.dirname(path), "ripme.jar")
|
||||
os.rename(path, new_path)
|
||||
return new_path
|
||||
return path
|
||||
|
||||
|
||||
ripmeJson = json.loads(open("ripme.json").read())
|
||||
fileToUploadPath = checkAndRenameFile(args.file)
|
||||
InNoninteractiveMode = args.non_interactive
|
||||
commitMessage = ripmeJson.get("changeList")[0]
|
||||
releaseVersion = ripmeJson.get("latestVersion")
|
||||
debug = args.debug
|
||||
accessToken = args.token
|
||||
repoOwner = "ripmeapp"
|
||||
repoName = "ripme"
|
||||
|
||||
if not os.path.isfile(fileToUploadPath):
|
||||
print("[!] Error: {} does not exist".format(fileToUploadPath))
|
||||
sys.exit(1)
|
||||
|
||||
if not isJar(fileToUploadPath):
|
||||
print("[!] Error: {} is not a jar file!".format(fileToUploadPath))
|
||||
sys.exit(1)
|
||||
|
||||
if not isValidCommitMessage(commitMessage):
|
||||
print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if not args.skip_hash_check:
|
||||
if debug:
|
||||
print("Reading file {}".format(fileToUploadPath))
|
||||
ripmeUpdate = open(fileToUploadPath, mode='rb').read()
|
||||
|
||||
# The actual hash of the file on disk
|
||||
actualHash = sha256(ripmeUpdate).hexdigest()
|
||||
|
||||
# The hash that we expect the update to have
|
||||
expectedHash = ripmeJson.get("currentHash")
|
||||
|
||||
# Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will
|
||||
# cause ripme to refuse to install the update for all users who haven't disabled update hash checking
|
||||
if expectedHash != actualHash:
|
||||
print("[!] Error: expected hash of file and actual hash differ")
|
||||
print("[!] Expected hash is {}".format(expectedHash))
|
||||
print("[!] Actual hash is {}".format(actualHash))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("[*] WARNING: SKIPPING HASH CHECK")
|
||||
# Ask the user to review the information before we precede
|
||||
# This only runs in we're in interactive mode
|
||||
if not InNoninteractiveMode:
|
||||
print("File path: {}".format(fileToUploadPath))
|
||||
print("Release title: {}".format(commitMessage))
|
||||
print("Repo: {}/{}".format(repoOwner, repoName))
|
||||
input("\nPlease review the information above and ensure it is correct and then press enter")
|
||||
|
||||
if not args.test:
|
||||
print("Accessing github using token")
|
||||
g = Github(accessToken)
|
||||
|
||||
print("Creating release")
|
||||
release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "")
|
||||
|
||||
print("Uploading file")
|
||||
release.upload_asset(fileToUploadPath, "ripme.jar")
|
||||
else:
|
||||
print("Not uploading release being script was run with --test flag")
|
13
ripme.json
13
ripme.json
@ -1,6 +1,14 @@
|
||||
{
|
||||
"latestVersion": "1.7.47",
|
||||
"latestVersion": "1.7.55",
|
||||
"changeList": [
|
||||
"1.7.55: Fixed instagram ripper; Reddit ripper now respects history.end_rip_after_already_seen; Improvements to patch.py and release.py",
|
||||
"1.7.54: Fixed twitter ripper video downloading; fixed instagram ripper",
|
||||
"1.7.53: Added Picstatio ripper; Fixed instagram ripper; Reddit ripper now gets videos from v.redd.it; Fixed ZikiRipper getAlbumTitle; fixed twitter ripper",
|
||||
"1.7.52: Added warning about using furaffinty shared account; Refactoring in Utils class; XhamsterRipper now accepts all countries subdomains; E621 ripper now accepts urls with order:Score at the end; release.py imrpovements; DeviantartRipper now logs in using cookies; patch.py imrpovements",
|
||||
"1.7.51: Fixed instagram ripper; Added the ability to rip from vsco profiles; Fixed TheyiffgalleryRipper; Can now update ripme using the -j flag; added script to automate releases; Code style fixes",
|
||||
"1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new",
|
||||
"1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked",
|
||||
"1.7.48: Fixed instagram ripper; Added Korean translation; Added quickQueue support to nhentairipper; Rewrote nhentairipper to be faster; myhentaicomics ripper now requests proper url when downloading images; Can now include comments in url files; Added the ability to blacklist tags on e-hentai.org",
|
||||
"1.7.47: Added quick queue support for hentai2read ripper; Fixed instagram ripper; SankakuComplexRipper can now download from different subdomains; Added ripper for bato.to; Added quick queue support for 8muses.download; ",
|
||||
"1.7.46: Fixed hentai2read ripper; Rewrote the myhentaicomics ripper to use the new getAlbumsToQueue func; Can now blacklist nhentai tags; SinnercomicsRipper no longer adds -page-01 to folder names; EightmusesRipper now adds file extension to filename; disbaled test for twitch ripper",
|
||||
"1.7.45: Fixed hentai2read ripper; ImageBam album fixed; Added various translations; TsuminoRipper no longer requires album name to download",
|
||||
@ -218,5 +226,6 @@
|
||||
"1.0.4: Fixed spaces-in-directory bug",
|
||||
"1.0.3: Added VK.com ripper",
|
||||
"1.0.1: Added auto-update functionality"
|
||||
]
|
||||
],
|
||||
"currentHash": "1df13a792f17e2e36a2c3a62527d0e97a3edbee14d1bdb0cd822c7d2a8ce3cf4"
|
||||
}
|
@ -217,12 +217,16 @@ public class App {
|
||||
//Read URLs from File
|
||||
if (cl.hasOption('f')) {
|
||||
String filename = cl.getOptionValue('f');
|
||||
try {
|
||||
|
||||
try (BufferedReader br = new BufferedReader(new FileReader(filename))) {
|
||||
String url;
|
||||
BufferedReader br = new BufferedReader(new FileReader(filename));
|
||||
while ((url = br.readLine()) != null) {
|
||||
// loop through each url in the file and proces each url individually.
|
||||
ripURL(url.trim(), cl.hasOption("n"));
|
||||
if (url.startsWith("//") || url.startsWith("#")) {
|
||||
logger.debug("Skipping over line \"" + url + "\"because it is a comment");
|
||||
} else {
|
||||
// loop through each url in the file and process each url individually.
|
||||
ripURL(url.trim(), !cl.hasOption("n"));
|
||||
}
|
||||
}
|
||||
} catch (FileNotFoundException fne) {
|
||||
logger.error("[!] File containing list of URLs not found. Cannot continue.");
|
||||
@ -234,7 +238,11 @@ public class App {
|
||||
//The URL to rip.
|
||||
if (cl.hasOption('u')) {
|
||||
String url = cl.getOptionValue('u').trim();
|
||||
ripURL(url, cl.hasOption("n"));
|
||||
ripURL(url, !cl.hasOption("n"));
|
||||
}
|
||||
|
||||
if (cl.hasOption('j')) {
|
||||
UpdateUtils.updateProgramCLI();
|
||||
}
|
||||
|
||||
}
|
||||
@ -286,6 +294,7 @@ public class App {
|
||||
opts.addOption("v", "version", false, "Show current version");
|
||||
opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
|
||||
opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
|
||||
opts.addOption("j", "update", false, "Update ripme");
|
||||
return opts;
|
||||
}
|
||||
|
||||
@ -326,7 +335,7 @@ public class App {
|
||||
} else {
|
||||
logger.info("Loading history from configuration");
|
||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||
if (HISTORY.toList().size() == 0) {
|
||||
if (HISTORY.toList().isEmpty()) {
|
||||
// Loaded from config, still no entries.
|
||||
// Guess rip history based on rip folder
|
||||
String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory());
|
||||
|
@ -81,7 +81,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
public void rip() throws IOException {
|
||||
int index = 0;
|
||||
int textindex = 0;
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||
Document doc = getFirstPage();
|
||||
|
||||
@ -97,7 +97,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
|
||||
while (doc != null) {
|
||||
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
break;
|
||||
}
|
||||
List<String> imageURLs = getURLsFromPage(doc);
|
||||
@ -111,13 +111,13 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty()) {
|
||||
throw new IOException("No images found at " + doc.location());
|
||||
}
|
||||
|
||||
for (String imageURL : imageURLs) {
|
||||
index += 1;
|
||||
logger.debug("Found image url #" + index + ": " + imageURL);
|
||||
LOGGER.debug("Found image url #" + index + ": " + imageURL);
|
||||
downloadURL(new URL(imageURL), index);
|
||||
if (isStopped()) {
|
||||
break;
|
||||
@ -125,16 +125,16 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
|
||||
logger.debug("Fetching description(s) from " + doc.location());
|
||||
LOGGER.debug("Fetching description(s) from " + doc.location());
|
||||
List<String> textURLs = getDescriptionsFromPage(doc);
|
||||
if (textURLs.size() > 0) {
|
||||
logger.debug("Found description link(s) from " + doc.location());
|
||||
if (!textURLs.isEmpty()) {
|
||||
LOGGER.debug("Found description link(s) from " + doc.location());
|
||||
for (String textURL : textURLs) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
textindex += 1;
|
||||
logger.debug("Getting description from " + textURL);
|
||||
LOGGER.debug("Getting description from " + textURL);
|
||||
String[] tempDesc = getDescription(textURL,doc);
|
||||
if (tempDesc != null) {
|
||||
if (Utils.getConfigBoolean("file.overwrite", false) || !(new File(
|
||||
@ -144,11 +144,11 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
+ getPrefix(index)
|
||||
+ (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))
|
||||
+ ".txt").exists())) {
|
||||
logger.debug("Got description from " + textURL);
|
||||
LOGGER.debug("Got description from " + textURL);
|
||||
saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))));
|
||||
sleep(descSleepTime());
|
||||
} else {
|
||||
logger.debug("Description from " + textURL + " already exists.");
|
||||
LOGGER.debug("Description from " + textURL + " already exists.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,14 +164,14 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
||||
doc = getNextPage(doc);
|
||||
} catch (IOException e) {
|
||||
logger.info("Can't get next page: " + e.getMessage());
|
||||
LOGGER.info("Can't get next page: " + e.getMessage());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If they're using a thread pool, wait for it.
|
||||
if (getThreadPool() != null) {
|
||||
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||
LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||
getThreadPool().waitForThreads();
|
||||
}
|
||||
waitForThreads();
|
||||
@ -237,12 +237,12 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
out.write(text.getBytes());
|
||||
out.close();
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||
LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||
return false;
|
||||
}
|
||||
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||
LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||
if (!saveFileAs.getParentFile().exists()) {
|
||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
saveFileAs.getParentFile().mkdirs();
|
||||
}
|
||||
return true;
|
||||
|
@ -50,12 +50,18 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
int index = 0;
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||
JSONObject json = getFirstPage();
|
||||
|
||||
while (json != null) {
|
||||
List<String> imageURLs = getURLsFromJSON(json);
|
||||
|
||||
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
break;
|
||||
}
|
||||
|
||||
// Remove all but 1 image
|
||||
if (isThisATest()) {
|
||||
while (imageURLs.size() > 1) {
|
||||
@ -63,7 +69,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty() && !hasASAPRipping()) {
|
||||
throw new IOException("No images found at " + this.url);
|
||||
}
|
||||
|
||||
@ -71,8 +77,9 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
|
||||
index += 1;
|
||||
logger.debug("Found image url #" + index+ ": " + imageURL);
|
||||
LOGGER.debug("Found image url #" + index+ ": " + imageURL);
|
||||
downloadURL(new URL(imageURL), index);
|
||||
}
|
||||
|
||||
@ -84,14 +91,14 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
||||
json = getNextPage(json);
|
||||
} catch (IOException e) {
|
||||
logger.info("Can't get next page: " + e.getMessage());
|
||||
LOGGER.info("Can't get next page: " + e.getMessage());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If they're using a thread pool, wait for it.
|
||||
if (getThreadPool() != null) {
|
||||
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||
LOGGER.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
||||
getThreadPool().waitForThreads();
|
||||
}
|
||||
waitForThreads();
|
||||
|
@ -27,7 +27,7 @@ public abstract class AbstractRipper
|
||||
extends Observable
|
||||
implements RipperInterface, Runnable {
|
||||
|
||||
protected static final Logger logger = Logger.getLogger(AbstractRipper.class);
|
||||
protected static final Logger LOGGER = Logger.getLogger(AbstractRipper.class);
|
||||
private final String URLHistoryFile = Utils.getURLHistoryFile();
|
||||
|
||||
public static final String USER_AGENT =
|
||||
@ -67,14 +67,35 @@ public abstract class AbstractRipper
|
||||
* @param downloadedURL URL to check if downloaded
|
||||
*/
|
||||
private void writeDownloadedURL(String downloadedURL) throws IOException {
|
||||
// If "save urls only" is checked don't write to the url history file
|
||||
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
||||
return;
|
||||
}
|
||||
downloadedURL = normalizeUrl(downloadedURL);
|
||||
BufferedWriter bw = null;
|
||||
FileWriter fw = null;
|
||||
try {
|
||||
File file = new File(URLHistoryFile);
|
||||
if (!new File(Utils.getConfigDir()).exists()) {
|
||||
LOGGER.error("Config dir doesn't exist");
|
||||
LOGGER.info("Making config dir");
|
||||
boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs();
|
||||
if (!couldMakeDir) {
|
||||
LOGGER.error("Couldn't make config dir");
|
||||
return;
|
||||
}
|
||||
}
|
||||
// if file doesnt exists, then create it
|
||||
if (!file.exists()) {
|
||||
file.createNewFile();
|
||||
boolean couldMakeDir = file.createNewFile();
|
||||
if (!couldMakeDir) {
|
||||
LOGGER.error("Couldn't url history file");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!file.canWrite()) {
|
||||
LOGGER.error("Can't write to url history file: " + URLHistoryFile);
|
||||
return;
|
||||
}
|
||||
fw = new FileWriter(file.getAbsoluteFile(), true);
|
||||
bw = new BufferedWriter(fw);
|
||||
@ -112,8 +133,8 @@ public abstract class AbstractRipper
|
||||
private boolean hasDownloadedURL(String url) {
|
||||
File file = new File(URLHistoryFile);
|
||||
url = normalizeUrl(url);
|
||||
try {
|
||||
Scanner scanner = new Scanner(file);
|
||||
|
||||
try (Scanner scanner = new Scanner(file)) {
|
||||
while (scanner.hasNextLine()) {
|
||||
final String lineFromFile = scanner.nextLine();
|
||||
if (lineFromFile.equals(url)) {
|
||||
@ -123,6 +144,7 @@ public abstract class AbstractRipper
|
||||
} catch (FileNotFoundException e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -225,10 +247,10 @@ public abstract class AbstractRipper
|
||||
try {
|
||||
stopCheck();
|
||||
} catch (IOException e) {
|
||||
logger.debug("Ripper has been stopped");
|
||||
LOGGER.debug("Ripper has been stopped");
|
||||
return false;
|
||||
}
|
||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||
LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||
String saveAs = getFileName(url, fileName, extension);
|
||||
File saveFileAs;
|
||||
try {
|
||||
@ -243,19 +265,19 @@ public abstract class AbstractRipper
|
||||
+ prefix
|
||||
+ saveAs);
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Error creating save file path for URL '" + url + "':", e);
|
||||
LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e);
|
||||
return false;
|
||||
}
|
||||
logger.debug("Downloading " + url + " to " + saveFileAs);
|
||||
LOGGER.debug("Downloading " + url + " to " + saveFileAs);
|
||||
if (!saveFileAs.getParentFile().exists()) {
|
||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
saveFileAs.getParentFile().mkdirs();
|
||||
}
|
||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||
try {
|
||||
writeDownloadedURL(url.toExternalForm() + "\n");
|
||||
} catch (IOException e) {
|
||||
logger.debug("Unable to write URL history file");
|
||||
LOGGER.debug("Unable to write URL history file");
|
||||
}
|
||||
}
|
||||
return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME);
|
||||
@ -335,7 +357,7 @@ public abstract class AbstractRipper
|
||||
* Waits for downloading threads to complete.
|
||||
*/
|
||||
protected void waitForThreads() {
|
||||
logger.debug("Waiting for threads to finish");
|
||||
LOGGER.debug("Waiting for threads to finish");
|
||||
completed = false;
|
||||
threadPool.waitForThreads();
|
||||
checkIfComplete();
|
||||
@ -387,13 +409,13 @@ public abstract class AbstractRipper
|
||||
*/
|
||||
void checkIfComplete() {
|
||||
if (observer == null) {
|
||||
logger.debug("observer is null");
|
||||
LOGGER.debug("observer is null");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!completed) {
|
||||
completed = true;
|
||||
logger.info(" Rip completed!");
|
||||
LOGGER.info(" Rip completed!");
|
||||
|
||||
RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount());
|
||||
RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc);
|
||||
@ -402,7 +424,7 @@ public abstract class AbstractRipper
|
||||
Logger rootLogger = Logger.getRootLogger();
|
||||
FileAppender fa = (FileAppender) rootLogger.getAppender("FILE");
|
||||
if (fa != null) {
|
||||
logger.debug("Changing log file back to 'ripme.log'");
|
||||
LOGGER.debug("Changing log file back to 'ripme.log'");
|
||||
fa.setFile("ripme.log");
|
||||
fa.activateOptions();
|
||||
}
|
||||
@ -411,7 +433,7 @@ public abstract class AbstractRipper
|
||||
try {
|
||||
Desktop.getDesktop().open(new File(urlFile));
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error while opening " + urlFile, e);
|
||||
LOGGER.warn("Error while opening " + urlFile, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -466,7 +488,7 @@ public abstract class AbstractRipper
|
||||
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
|
||||
try {
|
||||
AlbumRipper ripper = (AlbumRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
||||
logger.debug("Found album ripper: " + ripper.getClass().getName());
|
||||
LOGGER.debug("Found album ripper: " + ripper.getClass().getName());
|
||||
return ripper;
|
||||
} catch (Exception e) {
|
||||
// Incompatible rippers *will* throw exceptions during instantiation.
|
||||
@ -475,7 +497,7 @@ public abstract class AbstractRipper
|
||||
for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
|
||||
try {
|
||||
VideoRipper ripper = (VideoRipper) constructor.newInstance(url); // by design: can throw ClassCastException
|
||||
logger.debug("Found video ripper: " + ripper.getClass().getName());
|
||||
LOGGER.debug("Found video ripper: " + ripper.getClass().getName());
|
||||
return ripper;
|
||||
} catch (Exception e) {
|
||||
// Incompatible rippers *will* throw exceptions during instantiation.
|
||||
@ -532,11 +554,11 @@ public abstract class AbstractRipper
|
||||
try {
|
||||
rip();
|
||||
} catch (HttpStatusException e) {
|
||||
logger.error("Got exception while running ripper:", e);
|
||||
LOGGER.error("Got exception while running ripper:", e);
|
||||
waitForThreads();
|
||||
sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
|
||||
} catch (Exception e) {
|
||||
logger.error("Got exception while running ripper:", e);
|
||||
LOGGER.error("Got exception while running ripper:", e);
|
||||
waitForThreads();
|
||||
sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
|
||||
} finally {
|
||||
@ -549,10 +571,10 @@ public abstract class AbstractRipper
|
||||
private void cleanup() {
|
||||
if (this.workingDir.list().length == 0) {
|
||||
// No files, delete the dir
|
||||
logger.info("Deleting empty directory " + this.workingDir);
|
||||
LOGGER.info("Deleting empty directory " + this.workingDir);
|
||||
boolean deleteResult = this.workingDir.delete();
|
||||
if (!deleteResult) {
|
||||
logger.error("Unable to delete empty directory " + this.workingDir);
|
||||
LOGGER.error("Unable to delete empty directory " + this.workingDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -567,11 +589,11 @@ public abstract class AbstractRipper
|
||||
*/
|
||||
protected boolean sleep(int milliseconds) {
|
||||
try {
|
||||
logger.debug("Sleeping " + milliseconds + "ms");
|
||||
LOGGER.debug("Sleeping " + milliseconds + "ms");
|
||||
Thread.sleep(milliseconds);
|
||||
return true;
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Interrupted while waiting to load next page", e);
|
||||
LOGGER.error("Interrupted while waiting to load next page", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -585,7 +607,7 @@ public abstract class AbstractRipper
|
||||
|
||||
/** Methods for detecting when we're running a test. */
|
||||
public void markAsTest() {
|
||||
logger.debug("THIS IS A TEST RIP");
|
||||
LOGGER.debug("THIS IS A TEST RIP");
|
||||
thisIsATest = true;
|
||||
}
|
||||
protected boolean isThisATest() {
|
||||
|
@ -62,22 +62,21 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
|| itemsCompleted.containsKey(url)
|
||||
|| itemsErrored.containsKey(url) )) {
|
||||
// Item is already downloaded/downloading, skip it.
|
||||
logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
||||
LOGGER.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
|
||||
return false;
|
||||
}
|
||||
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
||||
// Output URL to file
|
||||
String urlFile = this.workingDir + File.separator + "urls.txt";
|
||||
try {
|
||||
FileWriter fw = new FileWriter(urlFile, true);
|
||||
try (FileWriter fw = new FileWriter(urlFile, true)) {
|
||||
fw.write(url.toExternalForm());
|
||||
fw.write("\n");
|
||||
fw.close();
|
||||
|
||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
|
||||
itemsCompleted.put(url, new File(urlFile));
|
||||
observer.update(this, msg);
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while writing to " + urlFile, e);
|
||||
LOGGER.error("Error while writing to " + urlFile, e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@ -129,7 +128,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
|
||||
checkIfComplete();
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while updating observer: ", e);
|
||||
LOGGER.error("Exception while updating observer: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -197,7 +196,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
} else {
|
||||
title = super.getAlbumTitle(this.url);
|
||||
}
|
||||
logger.debug("Using album title '" + title + "'");
|
||||
LOGGER.debug("Using album title '" + title + "'");
|
||||
|
||||
title = Utils.filesystemSafe(title);
|
||||
path += title;
|
||||
@ -205,10 +204,10 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
|
||||
this.workingDir = new File(path);
|
||||
if (!this.workingDir.exists()) {
|
||||
logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
||||
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
||||
this.workingDir.mkdirs();
|
||||
}
|
||||
logger.debug("Set working directory to: " + this.workingDir);
|
||||
LOGGER.debug("Set working directory to: " + this.workingDir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,9 @@
|
||||
package com.rarchives.ripme.ripper;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
@ -7,29 +11,27 @@ import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Map;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import com.sun.org.apache.xpath.internal.operations.Bool;
|
||||
|
||||
public abstract class VideoRipper extends AbstractRipper {
|
||||
|
||||
private int bytesTotal = 1,
|
||||
bytesCompleted = 1;
|
||||
private int bytesTotal = 1;
|
||||
private int bytesCompleted = 1;
|
||||
|
||||
protected VideoRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
public abstract boolean canRip(URL url);
|
||||
public abstract void rip() throws IOException;
|
||||
|
||||
public abstract String getHost();
|
||||
|
||||
public abstract String getGID(URL url) throws MalformedURLException;
|
||||
|
||||
@Override
|
||||
public void setBytesTotal(int bytes) {
|
||||
this.bytesTotal = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytesCompleted(int bytes) {
|
||||
this.bytesCompleted = bytes;
|
||||
@ -45,23 +47,22 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
if (Utils.getConfigBoolean("urls_only.save", false)) {
|
||||
// Output URL to file
|
||||
String urlFile = this.workingDir + File.separator + "urls.txt";
|
||||
try {
|
||||
FileWriter fw = new FileWriter(urlFile, true);
|
||||
|
||||
try (FileWriter fw = new FileWriter(urlFile, true)) {
|
||||
fw.write(url.toExternalForm());
|
||||
fw.write("\n");
|
||||
fw.close();
|
||||
|
||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
|
||||
observer.update(this, msg);
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while writing to " + urlFile, e);
|
||||
LOGGER.error("Error while writing to " + urlFile, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (isThisATest()) {
|
||||
// Tests shouldn't download the whole video
|
||||
// Just change this.url to the download URL so the test knows we found it.
|
||||
logger.debug("Test rip, found URL: " + url);
|
||||
LOGGER.debug("Test rip, found URL: " + url);
|
||||
this.url = url;
|
||||
return true;
|
||||
}
|
||||
@ -71,34 +72,36 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
|
||||
public boolean addURLToDownload(URL url, File saveAs, String referrer, Map<String, String> cookies, Boolean getFileExtFromMIME) {
|
||||
return addURLToDownload(url, saveAs);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates & sets working directory based on URL.
|
||||
* @param url
|
||||
* Target URL
|
||||
*
|
||||
* @param url Target URL
|
||||
*/
|
||||
@Override
|
||||
public void setWorkingDir(URL url) throws IOException {
|
||||
String path = Utils.getWorkingDirectory().getCanonicalPath();
|
||||
|
||||
if (!path.endsWith(File.separator)) {
|
||||
path += File.separator;
|
||||
}
|
||||
|
||||
path += "videos" + File.separator;
|
||||
this.workingDir = new File(path);
|
||||
if (!this.workingDir.exists()) {
|
||||
logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir));
|
||||
this.workingDir.mkdirs();
|
||||
workingDir = new File(path);
|
||||
|
||||
if (!workingDir.exists()) {
|
||||
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(workingDir));
|
||||
workingDir.mkdirs();
|
||||
}
|
||||
logger.debug("Set working directory to: " + this.workingDir);
|
||||
|
||||
LOGGER.debug("Set working directory to: " + workingDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return
|
||||
* Returns % of video done downloading.
|
||||
* @return Returns % of video done downloading.
|
||||
*/
|
||||
@Override
|
||||
public int getCompletionPercentage() {
|
||||
@ -107,16 +110,16 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
|
||||
/**
|
||||
* Runs if download successfully completed.
|
||||
* @param url
|
||||
* Target URL
|
||||
* @param saveAs
|
||||
* Path to file, including filename.
|
||||
*
|
||||
* @param url Target URL
|
||||
* @param saveAs Path to file, including filename.
|
||||
*/
|
||||
@Override
|
||||
public void downloadCompleted(URL url, File saveAs) {
|
||||
if (observer == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
String path = Utils.removeCWD(saveAs);
|
||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
|
||||
@ -124,65 +127,61 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
|
||||
checkIfComplete();
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while updating observer: ", e);
|
||||
LOGGER.error("Exception while updating observer: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs if the download errored somewhere.
|
||||
* @param url
|
||||
* Target URL
|
||||
* @param reason
|
||||
* Reason why the download failed.
|
||||
*
|
||||
* @param url Target URL
|
||||
* @param reason Reason why the download failed.
|
||||
*/
|
||||
@Override
|
||||
public void downloadErrored(URL url, String reason) {
|
||||
if (observer == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
|
||||
checkIfComplete();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Runs if user tries to redownload an already existing File.
|
||||
* @param url
|
||||
* Target URL
|
||||
* @param file
|
||||
* Existing file
|
||||
*
|
||||
* @param url Target URL
|
||||
* @param file Existing file
|
||||
*/
|
||||
@Override
|
||||
public void downloadExists(URL url, File file) {
|
||||
if (observer == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
|
||||
checkIfComplete();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the status and changes it to a human-readable form.
|
||||
* @return
|
||||
* Status of current download.
|
||||
*
|
||||
* @return Status of current download.
|
||||
*/
|
||||
@Override
|
||||
public String getStatusText() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(getCompletionPercentage())
|
||||
.append("% ")
|
||||
.append(" - ")
|
||||
.append(Utils.bytesToHumanReadable(bytesCompleted))
|
||||
.append(" / ")
|
||||
.append(Utils.bytesToHumanReadable(bytesTotal));
|
||||
return sb.toString();
|
||||
return String.valueOf(getCompletionPercentage()) +
|
||||
"% - " +
|
||||
Utils.bytesToHumanReadable(bytesCompleted) +
|
||||
" / " +
|
||||
Utils.bytesToHumanReadable(bytesTotal);
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Sanitizes URL.
|
||||
* Usually just returns itself.
|
||||
*/
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
return url;
|
||||
}
|
||||
@ -195,8 +194,10 @@ public abstract class VideoRipper extends AbstractRipper {
|
||||
if (observer == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (bytesCompleted >= bytesTotal) {
|
||||
super.checkIfComplete();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -9,7 +9,6 @@ import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
@ -20,7 +19,6 @@ import java.util.HashMap;
|
||||
|
||||
public class AerisdiesRipper extends AbstractHTMLRipper {
|
||||
|
||||
private Document albumDoc = null;
|
||||
private Map<String,String> cookies = new HashMap<>();
|
||||
|
||||
|
||||
@ -41,33 +39,32 @@ public class AerisdiesRipper extends AbstractHTMLRipper {
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://www.aerisdies.com/html/lb/[a-z]*_(\\d+)_\\d\\.html");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (!m.matches()) {
|
||||
throw new MalformedURLException("Expected URL format: http://www.aerisdies.com/html/lb/albumDIG, got: " + url);
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
return m.group(1);
|
||||
throw new MalformedURLException("Expected URL format: http://www.aerisdies.com/html/lb/albumDIG, got: " + url);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
// Attempt to use album title as GID
|
||||
String title = getFirstPage().select("div > div > span[id=albumname] > a").first().text();
|
||||
Element el = getFirstPage().select(".headtext").first();
|
||||
if (el == null) {
|
||||
throw new IOException("Unable to get album title");
|
||||
}
|
||||
String title = el.text();
|
||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
if (albumDoc == null) {
|
||||
Response resp = Http.url(url).response();
|
||||
cookies.putAll(resp.cookies());
|
||||
albumDoc = resp.parse();
|
||||
}
|
||||
return albumDoc;
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,258 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import org.json.JSONObject;
|
||||
|
||||
public class ArtStationRipper extends AbstractJSONRipper {
|
||||
enum URL_TYPE {
|
||||
SINGLE_PROJECT, USER_PORTFOLIO, UNKNOWN
|
||||
}
|
||||
|
||||
private ParsedURL albumURL;
|
||||
private String projectName;
|
||||
private Integer projectIndex;
|
||||
|
||||
public ArtStationRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getDomain() {
|
||||
return "artstation.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "ArtStation";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
JSONObject groupData;
|
||||
|
||||
// Parse URL and store for later use
|
||||
albumURL = parseURL(url);
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
|
||||
// URL points to single project, use project title as GID
|
||||
try {
|
||||
groupData = Http.url(albumURL.getLocation()).getJSON();
|
||||
} catch (IOException e) {
|
||||
throw new MalformedURLException("Couldn't load JSON from " + albumURL.getLocation());
|
||||
}
|
||||
return groupData.getString("title");
|
||||
}
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// URL points to user portfolio, use user's full name as GID
|
||||
String userInfoURL = "https://www.artstation.com/users/" + albumURL.getID() + "/quick.json";
|
||||
try {
|
||||
groupData = Http.url(userInfoURL).getJSON();
|
||||
} catch (IOException e) {
|
||||
throw new MalformedURLException("Couldn't load JSON from " + userInfoURL);
|
||||
}
|
||||
return groupData.getString("full_name");
|
||||
}
|
||||
|
||||
// No JSON found in the URL entered, can't rip
|
||||
throw new MalformedURLException(
|
||||
"Expected URL to an ArtStation project or user profile - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JSONObject getFirstPage() throws IOException {
|
||||
if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
|
||||
// URL points to JSON of a single project, just return it
|
||||
return Http.url(albumURL.getLocation()).getJSON();
|
||||
}
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// URL points to JSON of a list of projects, load it to parse individual
|
||||
// projects
|
||||
JSONObject albumContent = Http.url(albumURL.getLocation()).getJSON();
|
||||
|
||||
if (albumContent.getInt("total_count") > 0) {
|
||||
// Get JSON of the first project and return it
|
||||
JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(0);
|
||||
ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink")));
|
||||
return Http.url(projectURL.getLocation()).getJSON();
|
||||
}
|
||||
}
|
||||
|
||||
throw new IOException("URL specified points to an user with empty portfolio");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JSONObject getNextPage(JSONObject doc) throws IOException {
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// Initialize the index if it hasn't been initialized already
|
||||
if (projectIndex == null) {
|
||||
projectIndex = 1;
|
||||
}
|
||||
|
||||
JSONObject albumContent = Http.url(albumURL.getLocation()).getJSON();
|
||||
|
||||
if (albumContent.getInt("total_count") > projectIndex) {
|
||||
// Get JSON of the next project and return it
|
||||
JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(projectIndex);
|
||||
ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink")));
|
||||
projectIndex++;
|
||||
return Http.url(projectURL.getLocation()).getJSON();
|
||||
}
|
||||
|
||||
throw new IOException("No more projects");
|
||||
}
|
||||
|
||||
throw new IOException("Downloading a single project");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> getURLsFromJSON(JSONObject json) {
|
||||
List<String> assetURLs = new ArrayList<>();
|
||||
JSONObject currentObject;
|
||||
|
||||
// Update project name variable from JSON data. Used by downloadURL() to create
|
||||
// subfolders when input URL is URL_TYPE.USER_PORTFOLIO
|
||||
projectName = json.getString("title");
|
||||
|
||||
for (int i = 0; i < json.getJSONArray("assets").length(); i++) {
|
||||
currentObject = json.getJSONArray("assets").getJSONObject(i);
|
||||
|
||||
if (!currentObject.getString("image_url").isEmpty()) {
|
||||
// TODO: Find a way to rip external content.
|
||||
// ArtStation hosts only image content, everything else (videos, 3D Models, etc)
|
||||
// is hosted in other websites and displayed through embedded HTML5 players
|
||||
assetURLs.add(currentObject.getString("image_url"));
|
||||
}
|
||||
}
|
||||
|
||||
return assetURLs;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void downloadURL(URL url, int index) {
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// Replace not allowed characters with underlines
|
||||
String folderName = projectName.replaceAll("[\\\\/:*?\"<>|]", "_");
|
||||
|
||||
// Folder name also can't end with dots or spaces, strip them
|
||||
folderName = folderName.replaceAll("\\s+$", "");
|
||||
folderName = folderName.replaceAll("\\.+$", "");
|
||||
|
||||
// Downloading multiple projects, separate each one in subfolders
|
||||
addURLToDownload(url, "", folderName);
|
||||
} else {
|
||||
addURLToDownload(url);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String normalizeUrl(String url) {
|
||||
// Strip URL parameters
|
||||
return url.replaceAll("\\?\\w+$", "");
|
||||
}
|
||||
|
||||
private static class ParsedURL {
|
||||
URL_TYPE urlType;
|
||||
String jsonURL, urlID;
|
||||
|
||||
/**
|
||||
* Construct a new ParsedURL object.
|
||||
*
|
||||
* @param urlType URL_TYPE enum containing the URL type
|
||||
* @param jsonURL String containing the JSON URL location
|
||||
* @param urlID String containing the ID of this URL
|
||||
*
|
||||
*/
|
||||
ParsedURL(URL_TYPE urlType, String jsonURL, String urlID) {
|
||||
this.urlType = urlType;
|
||||
this.jsonURL = jsonURL;
|
||||
this.urlID = urlID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get URL Type of this ParsedURL object.
|
||||
*
|
||||
* @return URL_TYPE enum containing this object type
|
||||
*
|
||||
*/
|
||||
URL_TYPE getType() {
|
||||
return this.urlType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get JSON location of this ParsedURL object.
|
||||
*
|
||||
* @return String containing the JSON URL
|
||||
*
|
||||
*/
|
||||
String getLocation() {
|
||||
return this.jsonURL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get ID of this ParsedURL object.
|
||||
*
|
||||
* @return For URL_TYPE.SINGLE_PROJECT, returns the project hash. For
|
||||
* URL_TYPE.USER_PORTFOLIO, returns the account name
|
||||
*/
|
||||
String getID() {
|
||||
return this.urlID;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an ArtStation URL.
|
||||
*
|
||||
* @param url URL to an ArtStation user profile
|
||||
* (https://www.artstation.com/username) or single project
|
||||
* (https://www.artstation.com/artwork/projectid)
|
||||
* @return ParsedURL object containing URL type, JSON location and ID (stores
|
||||
* account name or project hash, depending of the URL type identified)
|
||||
*
|
||||
*/
|
||||
private ParsedURL parseURL(URL url) {
|
||||
String htmlSource;
|
||||
ParsedURL parsedURL;
|
||||
|
||||
// Load HTML Source of the specified URL
|
||||
try {
|
||||
htmlSource = Http.url(url).get().html();
|
||||
} catch (IOException e) {
|
||||
htmlSource = "";
|
||||
}
|
||||
|
||||
// Check if HTML Source of the specified URL references a project
|
||||
Pattern p = Pattern.compile("'/projects/(\\w+)\\.json'");
|
||||
Matcher m = p.matcher(htmlSource);
|
||||
if (m.find()) {
|
||||
parsedURL = new ParsedURL(URL_TYPE.SINGLE_PROJECT,
|
||||
"https://www.artstation.com/projects/" + m.group(1) + ".json", m.group(1));
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
// Check if HTML Source of the specified URL references a user profile
|
||||
p = Pattern.compile("'/users/([\\w-]+)/quick\\.json'");
|
||||
m = p.matcher(htmlSource);
|
||||
if (m.find()) {
|
||||
parsedURL = new ParsedURL(URL_TYPE.USER_PORTFOLIO,
|
||||
"https://www.artstation.com/users/" + m.group(1) + "/projects.json", m.group(1));
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
// HTML Source of the specified URL doesn't reference a user profile or project
|
||||
parsedURL = new ParsedURL(URL_TYPE.UNKNOWN, null, null);
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
}
|
@ -57,10 +57,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
||||
public boolean pageContainsAlbums(URL url) {
|
||||
Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -79,7 +76,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_");
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -94,10 +91,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
||||
|
||||
p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -119,7 +113,7 @@ public class BatoRipper extends AbstractHTMLRipper {
|
||||
s = s.replaceAll("var prevCha = null;", "");
|
||||
s = s.replaceAll("var nextCha = \\.*;", "");
|
||||
String json = s.replaceAll("var images = ", "").replaceAll(";", "");
|
||||
logger.info(s);
|
||||
LOGGER.info(s);
|
||||
JSONObject images = new JSONObject(json);
|
||||
for (int i = 1; i < images.length() +1; i++) {
|
||||
result.add(images.getString(Integer.toString(i)));
|
||||
|
@ -56,7 +56,7 @@ public class BcfakesRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements hrefs = doc.select("a.next");
|
||||
if (hrefs.size() == 0) {
|
||||
if (hrefs.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = "http://www.bcfakes.com" + hrefs.first().attr("href");
|
||||
|
@ -19,7 +19,7 @@ import com.rarchives.ripme.utils.RipUtils;
|
||||
|
||||
public class ChanRipper extends AbstractHTMLRipper {
|
||||
private static List<ChanSite> explicit_domains = Arrays.asList(
|
||||
new ChanSite(Arrays.asList("boards.4chan.org"), Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org")),
|
||||
new ChanSite(Arrays.asList("boards.4chan.org"), Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org", "is3.4chan.org")),
|
||||
new ChanSite(Arrays.asList("4archive.org"), Arrays.asList("imgur.com")),
|
||||
new ChanSite(Arrays.asList("archive.4plebs.org"), Arrays.asList("img.4plebs.org"))
|
||||
);
|
||||
@ -68,11 +68,11 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
String subject = doc.select(".post.op > .postinfo > .subject").first().text();
|
||||
return getHost() + "_" + getGID(url) + "_" + subject;
|
||||
} catch (NullPointerException e) {
|
||||
logger.warn("Failed to get thread title from " + url);
|
||||
LOGGER.warn("Failed to get thread title from " + url);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
// Fall back on the GID
|
||||
return getHost() + "_" + getGID(url);
|
||||
@ -85,8 +85,19 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return url.toExternalForm().contains("/res/") // Most chans
|
||||
|| url.toExternalForm().contains("/thread/"); // 4chan, archive.moe
|
||||
if (url.toExternalForm().contains("desuchan.net") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("boards.420chan.org") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("7chan.org") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("xchan.pw") && url.toExternalForm().contains("/board/")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -144,7 +155,7 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
private boolean isURLBlacklisted(String url) {
|
||||
for (String blacklist_item : url_piece_blacklist) {
|
||||
if (url.contains(blacklist_item)) {
|
||||
logger.debug("Skipping link that contains '"+blacklist_item+"': " + url);
|
||||
LOGGER.debug("Skipping link that contains '"+blacklist_item+"': " + url);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -185,7 +196,7 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
// Don't download the same URL twice
|
||||
if (imageURLs.contains(href)) {
|
||||
logger.debug("Already attempted: " + href);
|
||||
LOGGER.debug("Already attempted: " + href);
|
||||
continue;
|
||||
}
|
||||
imageURLs.add(href);
|
||||
|
@ -63,7 +63,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package com.rarchives.ripme.ripper.rippers;
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Base64;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import com.rarchives.ripme.utils.RipUtils;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
@ -37,6 +38,10 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
String loginCookies = "auth=__0f9158aaec09f417b235%3B%221ff79836392a515d154216d919eae573%22;" +
|
||||
"auth_secure=__41d14dd0da101f411bb0%3B%2281cf2cf9477776162a1172543aae85ce%22;" +
|
||||
"userinfo=__bf84ac233bfa8ae642e8%3B%7B%22username%22%3A%22grabpy%22%2C%22uniqueid%22%3A%22a0a876aa37dbd4b30e1c80406ee9c280%22%2C%22vd%22%3A%22BbHUXZ%2CBbHUXZ%2CA%2CU%2CA%2C%2CB%2CA%2CB%2CBbHUXZ%2CBbHUdj%2CL%2CL%2CA%2CBbHUdj%2C13%2CA%2CB%2CA%2C%2CA%2CA%2CB%2CA%2CA%2C%2CA%22%2C%22attr%22%3A56%7D";
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "deviantart";
|
||||
@ -117,23 +122,16 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
|
||||
//Test to see if there is a login:
|
||||
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
|
||||
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||
// Base64 da login
|
||||
// username: Z3JhYnB5
|
||||
// password: ZmFrZXJz
|
||||
|
||||
if (username == null || password == null) {
|
||||
logger.debug("No DeviantArt login provided.");
|
||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||
} else {
|
||||
// Attempt Login
|
||||
try {
|
||||
cookies = loginToDeviantart();
|
||||
} catch (IOException e) {
|
||||
logger.warn("Failed to login: ", e);
|
||||
|
||||
cookies = getDACookies();
|
||||
if (cookies.isEmpty()) {
|
||||
LOGGER.warn("Failed to get login cookies");
|
||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return Http.url(this.url)
|
||||
.cookies(cookies)
|
||||
@ -161,7 +159,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\""));
|
||||
return script.replace("\\/", "/");
|
||||
} catch (StringIndexOutOfBoundsException e) {
|
||||
logger.debug("Unable to get json link from " + page.location());
|
||||
LOGGER.debug("Unable to get json link from " + page.location());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -204,7 +202,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
}
|
||||
if (triedURLs.contains(fullSize)) {
|
||||
logger.warn("Already tried to download " + fullSize);
|
||||
LOGGER.warn("Already tried to download " + fullSize);
|
||||
continue;
|
||||
}
|
||||
triedURLs.add(fullSize);
|
||||
@ -222,7 +220,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
List<String> textURLs = new ArrayList<>();
|
||||
// Iterate over all thumbnails
|
||||
for (Element thumb : page.select("div.zones-container span.thumb")) {
|
||||
logger.info(thumb.attr("href"));
|
||||
LOGGER.info(thumb.attr("href"));
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
@ -241,8 +239,8 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
return null;
|
||||
}
|
||||
Elements nextButtons = page.select("link[rel=\"next\"]");
|
||||
if (nextButtons.size() == 0) {
|
||||
if (page.select("link[rel=\"prev\"]").size() == 0) {
|
||||
if (nextButtons.isEmpty()) {
|
||||
if (page.select("link[rel=\"prev\"]").isEmpty()) {
|
||||
throw new IOException("No next page found");
|
||||
} else {
|
||||
throw new IOException("Hit end of pages");
|
||||
@ -256,7 +254,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
if (!sleep(PAGE_SLEEP_TIME)) {
|
||||
throw new IOException("Interrupted while waiting to load next page: " + nextPage);
|
||||
}
|
||||
logger.info("Found next page: " + nextPage);
|
||||
LOGGER.info("Found next page: " + nextPage);
|
||||
return Http.url(nextPage)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
@ -351,7 +349,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize};
|
||||
// TODO Make this not make a newline if someone just types \n into the description.
|
||||
} catch (IOException ioe) {
|
||||
logger.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
|
||||
LOGGER.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -376,20 +374,20 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
Elements els = doc.select("img.dev-content-full");
|
||||
String fsimage = null;
|
||||
// Get the largest resolution image on the page
|
||||
if (els.size() > 0) {
|
||||
if (!els.isEmpty()) {
|
||||
// Large image
|
||||
fsimage = els.get(0).attr("src");
|
||||
logger.info("Found large-scale: " + fsimage);
|
||||
LOGGER.info("Found large-scale: " + fsimage);
|
||||
if (fsimage.contains("//orig")) {
|
||||
return fsimage;
|
||||
}
|
||||
}
|
||||
// Try to find the download button
|
||||
els = doc.select("a.dev-page-download");
|
||||
if (els.size() > 0) {
|
||||
if (!els.isEmpty()) {
|
||||
// Full-size image
|
||||
String downloadLink = els.get(0).attr("href");
|
||||
logger.info("Found download button link: " + downloadLink);
|
||||
LOGGER.info("Found download button link: " + downloadLink);
|
||||
HttpURLConnection con = (HttpURLConnection) new URL(downloadLink).openConnection();
|
||||
con.setRequestProperty("Referer",this.url.toString());
|
||||
String cookieString = "";
|
||||
@ -406,7 +404,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
con.disconnect();
|
||||
if (location.contains("//orig")) {
|
||||
fsimage = location;
|
||||
logger.info("Found image download: " + location);
|
||||
LOGGER.info("Found image download: " + location);
|
||||
}
|
||||
}
|
||||
if (fsimage != null) {
|
||||
@ -415,9 +413,9 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
throw new IOException("No download page found");
|
||||
} catch (IOException ioe) {
|
||||
try {
|
||||
logger.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
|
||||
LOGGER.info("Failed to get full size download image at " + page + " : '" + ioe.getMessage() + "'");
|
||||
String lessThanFull = thumbToFull(thumb, false);
|
||||
logger.info("Falling back to less-than-full-size image " + lessThanFull);
|
||||
LOGGER.info("Falling back to less-than-full-size image " + lessThanFull);
|
||||
return lessThanFull;
|
||||
} catch (Exception e) {
|
||||
return null;
|
||||
@ -426,47 +424,10 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs into deviant art. Required to rip full-size NSFW content.
|
||||
* Returns DA cookies.
|
||||
* @return Map of cookies containing session data.
|
||||
*/
|
||||
private Map<String, String> loginToDeviantart() throws IOException {
|
||||
// Populate postData fields
|
||||
Map<String,String> postData = new HashMap<>();
|
||||
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
|
||||
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||
if (username == null || password == null) {
|
||||
throw new IOException("could not find username or password in config");
|
||||
}
|
||||
Response resp = Http.url("http://www.deviantart.com/")
|
||||
.response();
|
||||
for (Element input : resp.parse().select("form#form-login input[type=hidden]")) {
|
||||
postData.put(input.attr("name"), input.attr("value"));
|
||||
}
|
||||
postData.put("username", username);
|
||||
postData.put("password", password);
|
||||
postData.put("remember_me", "1");
|
||||
|
||||
// Send login request
|
||||
resp = Http.url("https://www.deviantart.com/users/login")
|
||||
.userAgent(USER_AGENT)
|
||||
.data(postData)
|
||||
.cookies(resp.cookies())
|
||||
.method(Method.POST)
|
||||
.response();
|
||||
|
||||
// Assert we are logged in
|
||||
if (resp.hasHeader("Location") && resp.header("Location").contains("password")) {
|
||||
// Wrong password
|
||||
throw new IOException("Wrong password");
|
||||
}
|
||||
if (resp.url().toExternalForm().contains("bad_form")) {
|
||||
throw new IOException("Login form was incorrectly submitted");
|
||||
}
|
||||
if (resp.cookie("auth_secure") == null ||
|
||||
resp.cookie("auth") == null) {
|
||||
throw new IOException("No auth_secure or auth cookies received");
|
||||
}
|
||||
// We are logged in, save the cookies
|
||||
return resp.cookies();
|
||||
private Map<String, String> getDACookies() {
|
||||
return RipUtils.getCookiesFromString(Utils.getConfigString("deviantart.cookies", loginCookies));
|
||||
}
|
||||
}
|
@ -60,7 +60,7 @@ public class DrawcrowdRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Elements loadMore = doc.select("a#load-more");
|
||||
if (loadMore.size() == 0) {
|
||||
if (loadMore.isEmpty()) {
|
||||
throw new IOException("No next page found");
|
||||
}
|
||||
if (!sleep(1000)) {
|
||||
|
@ -49,7 +49,7 @@ public class DribbbleRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements hrefs = doc.select("a.next_page");
|
||||
if (hrefs.size() == 0) {
|
||||
if (hrefs.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = "https://www.dribbble.com" + hrefs.first().attr("href");
|
||||
|
@ -96,40 +96,40 @@ public class E621Ripper extends AbstractHTMLRipper{
|
||||
|
||||
private String getTerm(URL url) throws MalformedURLException{
|
||||
if(gidPattern==null)
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'():,%\\-]+)(/.*)?(#.*)?$");
|
||||
if(gidPatternPool==null)
|
||||
gidPatternPool=Pattern.compile("^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%-]+)(\\?.*)?(/.*)?(#.*)?$");
|
||||
gidPatternPool=Pattern.compile("^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\?.*)?(/.*)?(#.*)?$");
|
||||
|
||||
Matcher m = gidPattern.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
return m.group(2);
|
||||
if(m.matches()) {
|
||||
LOGGER.info(m.group(2));
|
||||
return m.group(2);
|
||||
}
|
||||
|
||||
m = gidPatternPool.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
return m.group(2);
|
||||
if(m.matches()) {
|
||||
return m.group(2);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected e621.net URL format: e621.net/post/index/1/searchterm - got "+url+" instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
try {
|
||||
|
||||
String prefix="";
|
||||
if(url.getPath().startsWith("/pool/show/"))
|
||||
prefix="pool_";
|
||||
if (url.getPath().startsWith("/pool/show/")) {
|
||||
prefix = "pool_";
|
||||
}
|
||||
|
||||
return Utils.filesystemSafe(prefix+new URI(getTerm(url)).getPath());
|
||||
} catch (URISyntaxException ex) {
|
||||
logger.error(ex);
|
||||
}
|
||||
return Utils.filesystemSafe(prefix+getTerm(url));
|
||||
|
||||
throw new MalformedURLException("Expected e621.net URL format: e621.net/post/index/1/searchterm - got "+url+" instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
if(gidPattern2==null)
|
||||
gidPattern2=Pattern.compile("^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
gidPattern2=Pattern.compile("^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'():,%-]+)(/.*)?(#.*)?$");
|
||||
|
||||
Matcher m = gidPattern2.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
|
@ -11,6 +11,7 @@ import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
@ -69,7 +70,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + elems.first().text();
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -102,7 +103,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
int retries = 3;
|
||||
while (true) {
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
||||
logger.info("Retrieving " + url);
|
||||
LOGGER.info("Retrieving " + url);
|
||||
doc = Http.url(url)
|
||||
.referrer(this.url)
|
||||
.cookies(cookies)
|
||||
@ -111,7 +112,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
if (retries == 0) {
|
||||
throw new IOException("Hit rate limit and maximum number of retries, giving up");
|
||||
}
|
||||
logger.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
|
||||
LOGGER.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
|
||||
retries--;
|
||||
try {
|
||||
Thread.sleep(IP_BLOCK_SLEEP_TIME);
|
||||
@ -125,12 +126,55 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks for blacklisted tags on page. If it finds one it returns it, if not it return null
|
||||
*
|
||||
* @param doc
|
||||
* @return String
|
||||
*/
|
||||
public String checkTags(Document doc, String[] blackListedTags) {
|
||||
// If the user hasn't blacklisted any tags we return null;
|
||||
if (blackListedTags == null) {
|
||||
return null;
|
||||
}
|
||||
LOGGER.info("Blacklisted tags " + blackListedTags[0]);
|
||||
List<String> tagsOnPage = getTags(doc);
|
||||
for (String tag : blackListedTags) {
|
||||
for (String pageTag : tagsOnPage) {
|
||||
// We replace all dashes in the tag with spaces because the tags we get from the site are separated using
|
||||
// dashes
|
||||
if (tag.trim().toLowerCase().equals(pageTag.toLowerCase())) {
|
||||
return tag;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<String> getTags(Document doc) {
|
||||
List<String> tags = new ArrayList<>();
|
||||
LOGGER.info("Getting tags");
|
||||
for (Element tag : doc.select("td > div > a")) {
|
||||
LOGGER.info("Found tag " + tag.text());
|
||||
tags.add(tag.text());
|
||||
}
|
||||
return tags;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
if (albumDoc == null) {
|
||||
albumDoc = getPageWithRetries(this.url);
|
||||
}
|
||||
this.lastURL = this.url.toExternalForm();
|
||||
LOGGER.info("Checking blacklist");
|
||||
String blacklistedTag = checkTags(albumDoc, Utils.getConfigStringArray("ehentai.blacklist.tags"));
|
||||
if (blacklistedTag != null) {
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
|
||||
"contains the blacklisted tag \"" + blacklistedTag + "\"");
|
||||
return null;
|
||||
}
|
||||
return albumDoc;
|
||||
}
|
||||
|
||||
@ -142,14 +186,14 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
// Find next page
|
||||
Elements hrefs = doc.select(".ptt a");
|
||||
if (hrefs.size() == 0) {
|
||||
logger.info("doc: " + doc.html());
|
||||
if (hrefs.isEmpty()) {
|
||||
LOGGER.info("doc: " + doc.html());
|
||||
throw new IOException("No navigation links found");
|
||||
}
|
||||
// Ensure next page is different from the current page
|
||||
String nextURL = hrefs.last().attr("href");
|
||||
if (nextURL.equals(this.lastURL)) {
|
||||
logger.info("lastURL = nextURL : " + nextURL);
|
||||
LOGGER.info("lastURL = nextURL : " + nextURL);
|
||||
throw new IOException("Reached last page of results");
|
||||
}
|
||||
// Sleep before loading next page
|
||||
@ -179,7 +223,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
Thread.sleep(IMAGE_SLEEP_TIME);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
logger.warn("Interrupted while waiting to load next image", e);
|
||||
LOGGER.warn("Interrupted while waiting to load next image", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,17 +255,17 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
|
||||
// Find image
|
||||
Elements images = doc.select(".sni > a > img");
|
||||
if (images.size() == 0) {
|
||||
if (images.isEmpty()) {
|
||||
// Attempt to find image elsewise (Issue #41)
|
||||
images = doc.select("img#img");
|
||||
if (images.size() == 0) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
if (images.isEmpty()) {
|
||||
LOGGER.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Element image = images.first();
|
||||
String imgsrc = image.attr("src");
|
||||
logger.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||
LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||
Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$");
|
||||
Matcher m = p.matcher(imgsrc);
|
||||
if (m.matches()) {
|
||||
@ -242,7 +286,7 @@ public class EHentaiRipper extends AbstractHTMLRipper {
|
||||
addURLToDownload(new URL(imgsrc), prefix);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -96,19 +96,19 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
if (thumb.attr("href").contains("/comics/album/")) {
|
||||
String subUrl = "https://www.8muses.com" + thumb.attr("href");
|
||||
try {
|
||||
logger.info("Retrieving " + subUrl);
|
||||
LOGGER.info("Retrieving " + subUrl);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, subUrl);
|
||||
Document subPage = Http.url(subUrl).get();
|
||||
// If the page below this one has images this line will download them
|
||||
List<String> subalbumImages = getURLsFromPage(subPage);
|
||||
logger.info("Found " + subalbumImages.size() + " images in subalbum");
|
||||
LOGGER.info("Found " + subalbumImages.size() + " images in subalbum");
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error while loading subalbum " + subUrl, e);
|
||||
LOGGER.warn("Error while loading subalbum " + subUrl, e);
|
||||
}
|
||||
|
||||
} else if (thumb.attr("href").contains("/comics/picture/")) {
|
||||
logger.info("This page is a album");
|
||||
logger.info("Ripping image");
|
||||
LOGGER.info("This page is a album");
|
||||
LOGGER.info("Ripping image");
|
||||
if (super.isStopped()) break;
|
||||
// Find thumbnail image source
|
||||
String image = null;
|
||||
@ -122,7 +122,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
imageHref = "https://www.8muses.com" + imageHref;
|
||||
}
|
||||
try {
|
||||
logger.info("Retrieving full-size image location from " + imageHref);
|
||||
LOGGER.info("Retrieving full-size image location from " + imageHref);
|
||||
image = getFullSizeImage(imageHref);
|
||||
URL imageUrl = new URL(image);
|
||||
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||
@ -134,7 +134,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
x++;
|
||||
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to get full-size image from " + imageHref);
|
||||
LOGGER.error("Failed to get full-size image from " + imageHref);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -152,7 +152,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
|
||||
private String getFullSizeImage(String imageUrl) throws IOException {
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, imageUrl);
|
||||
logger.info("Getting full sized image from " + imageUrl);
|
||||
LOGGER.info("Getting full sized image from " + imageUrl);
|
||||
Document doc = new Http(imageUrl).get(); // Retrieve the webpage of the image URL
|
||||
String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page
|
||||
return "https://www.8muses.com/image/fm/" + imageName;
|
||||
@ -166,14 +166,14 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private String getSubdir(String rawHref) {
|
||||
logger.info("Raw title: " + rawHref);
|
||||
LOGGER.info("Raw title: " + rawHref);
|
||||
String title = rawHref;
|
||||
title = title.replaceAll("8muses - Sex and Porn Comics", "");
|
||||
title = title.replaceAll("\t\t", "");
|
||||
title = title.replaceAll("\n", "");
|
||||
title = title.replaceAll("\\| ", "");
|
||||
title = title.replace(" ", "-");
|
||||
logger.info(title);
|
||||
LOGGER.info(title);
|
||||
return title;
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -139,7 +139,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
video_page = Http.url("eroshae.com" + link.attr("href")).get();
|
||||
} catch (IOException e) {
|
||||
logger.warn("Failed to log link in Jsoup");
|
||||
LOGGER.warn("Failed to log link in Jsoup");
|
||||
video_page = null;
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ import com.rarchives.ripme.utils.Http;
|
||||
*/
|
||||
public class EromeRipper extends AbstractHTMLRipper {
|
||||
|
||||
boolean rippingProfile;
|
||||
|
||||
|
||||
public EromeRipper (URL url) throws IOException {
|
||||
super(url);
|
||||
@ -42,6 +44,27 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasQueueSupport() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean pageContainsAlbums(URL url) {
|
||||
Pattern pa = Pattern.compile("https?://www.erome.com/([a-zA-Z0-9_-]*)/?");
|
||||
Matcher ma = pa.matcher(url.toExternalForm());
|
||||
return ma.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAlbumsToQueue(Document doc) {
|
||||
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||
for (Element elem : doc.select("div#albums > div.album > a")) {
|
||||
urlsToAddToQueue.add(elem.attr("href"));
|
||||
}
|
||||
return urlsToAddToQueue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
@ -52,7 +75,9 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + getGID(url) + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
} catch (NullPointerException e) {
|
||||
return getHost() + "_" + getGID(url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -66,21 +91,7 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> URLs = new ArrayList<>();
|
||||
//Pictures
|
||||
Elements imgs = doc.select("div.img > img.img-front");
|
||||
for (Element img : imgs) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(imageURL);
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.select("div.video > video > source");
|
||||
for (Element vid : vids) {
|
||||
String videoURL = vid.attr("src");
|
||||
URLs.add("https:" + videoURL);
|
||||
}
|
||||
|
||||
return URLs;
|
||||
return getMediaFromPage(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -100,7 +111,7 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
p = Pattern.compile("^https?://erome.com/a/([a-zA-Z0-9]*)/?$");
|
||||
p = Pattern.compile("^https?://www.erome.com/([a-zA-Z0-9_-]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
|
||||
if (m.matches()) {
|
||||
@ -110,34 +121,15 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
throw new MalformedURLException("erome album not found in " + url + ", expected https://www.erome.com/album");
|
||||
}
|
||||
|
||||
public static List<URL> getURLs(URL url) throws IOException{
|
||||
|
||||
Response resp = Http.url(url)
|
||||
.ignoreContentType()
|
||||
.response();
|
||||
|
||||
Document doc = resp.parse();
|
||||
|
||||
List<URL> URLs = new ArrayList<>();
|
||||
//Pictures
|
||||
Elements imgs = doc.getElementsByTag("img");
|
||||
for (Element img : imgs) {
|
||||
if (img.hasClass("album-image")) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(new URL(imageURL));
|
||||
}
|
||||
private List<String> getMediaFromPage(Document doc) {
|
||||
List<String> results = new ArrayList<>();
|
||||
for (Element el : doc.select("img.img-front")) {
|
||||
results.add("https:" + el.attr("src"));
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.getElementsByTag("video");
|
||||
for (Element vid : vids) {
|
||||
if (vid.hasClass("album-video")) {
|
||||
Elements source = vid.getElementsByTag("source");
|
||||
String videoURL = source.first().attr("src");
|
||||
URLs.add(new URL(videoURL));
|
||||
}
|
||||
for (Element el : doc.select("source[label=HD]")) {
|
||||
results.add("https:" + el.attr("src"));
|
||||
}
|
||||
|
||||
return URLs;
|
||||
return results;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
|
||||
/** Convert username to UserID. */
|
||||
private String getUserID(String username) throws IOException {
|
||||
logger.info("Fetching user ID for " + username);
|
||||
LOGGER.info("Fetching user ID for " + username);
|
||||
JSONObject json = new Http("https://api.500px.com/v1/" +
|
||||
"users/show" +
|
||||
"?username=" + username +
|
||||
@ -165,7 +165,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
@Override
|
||||
public JSONObject getFirstPage() throws IOException {
|
||||
URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY);
|
||||
logger.debug("apiURL: " + apiURL);
|
||||
LOGGER.debug("apiURL: " + apiURL);
|
||||
JSONObject json = Http.url(apiURL).getJSON();
|
||||
|
||||
if (baseURL.contains("/galleries?")) {
|
||||
@ -185,7 +185,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
+ "?rpp=100"
|
||||
+ "&image_size=5"
|
||||
+ "&consumer_key=" + CONSUMER_KEY;
|
||||
logger.info("Loading " + blogURL);
|
||||
LOGGER.info("Loading " + blogURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID);
|
||||
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
||||
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
||||
@ -216,7 +216,7 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
+ "&rpp=100"
|
||||
+ "&image_size=5"
|
||||
+ "&consumer_key=" + CONSUMER_KEY;
|
||||
logger.info("Loading " + blogURL);
|
||||
LOGGER.info("Loading " + blogURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username);
|
||||
JSONObject thisJSON = Http.url(blogURL).getJSON();
|
||||
JSONArray thisPhotos = thisJSON.getJSONArray("photos");
|
||||
@ -268,20 +268,20 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
Document doc;
|
||||
Elements images = new Elements();
|
||||
try {
|
||||
logger.debug("Loading " + rawUrl);
|
||||
LOGGER.debug("Loading " + rawUrl);
|
||||
super.retrievingSource(rawUrl);
|
||||
doc = Http.url(rawUrl).get();
|
||||
images = doc.select("div#preload img");
|
||||
}
|
||||
catch (IOException e) {
|
||||
logger.error("Error fetching full-size image from " + rawUrl, e);
|
||||
LOGGER.error("Error fetching full-size image from " + rawUrl, e);
|
||||
}
|
||||
if (images.size() > 0) {
|
||||
if (!images.isEmpty()) {
|
||||
imageURL = images.first().attr("src");
|
||||
logger.debug("Found full-size non-watermarked image: " + imageURL);
|
||||
LOGGER.debug("Found full-size non-watermarked image: " + imageURL);
|
||||
}
|
||||
else {
|
||||
logger.debug("Falling back to image_url from API response");
|
||||
LOGGER.debug("Falling back to image_url from API response");
|
||||
imageURL = photo.getString("image_url");
|
||||
imageURL = imageURL.replaceAll("/4\\.", "/5.");
|
||||
// See if there's larger images
|
||||
@ -289,14 +289,14 @@ public class FivehundredpxRipper extends AbstractJSONRipper {
|
||||
String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
|
||||
sleep(10);
|
||||
if (urlExists(fsURL)) {
|
||||
logger.info("Found larger image at " + fsURL);
|
||||
LOGGER.info("Found larger image at " + fsURL);
|
||||
imageURL = fsURL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (imageURL == null) {
|
||||
logger.error("Failed to find image for photo " + photo.toString());
|
||||
LOGGER.error("Failed to find image for photo " + photo.toString());
|
||||
}
|
||||
else {
|
||||
imageURLs.add(imageURL);
|
||||
|
@ -250,8 +250,8 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
Document doc = getLargestImagePageDocument(this.url);
|
||||
Elements fullsizeImages = doc.select("div#allsizes-photo img");
|
||||
if (fullsizeImages.size() == 0) {
|
||||
logger.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
|
||||
if (fullsizeImages.isEmpty()) {
|
||||
LOGGER.error("Could not find flickr image at " + doc.location() + " - missing 'div#allsizes-photo img'");
|
||||
}
|
||||
else {
|
||||
String prefix = "";
|
||||
@ -263,7 +263,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,7 +274,7 @@ public class FlickrRipper extends AbstractHTMLRipper {
|
||||
String largestImagePage = this.url.toExternalForm();
|
||||
for (Element olSize : doc.select("ol.sizes-list > li > ol > li")) {
|
||||
Elements ola = olSize.select("a");
|
||||
if (ola.size() == 0) {
|
||||
if (ola.isEmpty()) {
|
||||
largestImagePage = this.url.toExternalForm();
|
||||
}
|
||||
else {
|
||||
|
@ -12,8 +12,10 @@ import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.HttpStatusException;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
@ -24,13 +26,27 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import static com.rarchives.ripme.utils.RipUtils.getCookiesFromString;
|
||||
|
||||
public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
|
||||
private static final String urlBase = "https://www.furaffinity.net";
|
||||
private static Map<String,String> cookies = new HashMap<>();
|
||||
static {
|
||||
cookies.put("b", "bd5ccac8-51dc-4265-8ae1-7eac685ad667");
|
||||
cookies.put("a", "7c41b782-d01d-4b0e-b45b-62a4f0b2a369");
|
||||
private Map<String,String> cookies = new HashMap<>();
|
||||
|
||||
private void setCookies() {
|
||||
if (Utils.getConfigBoolean("furaffinity.login", true)) {
|
||||
LOGGER.info("Logging in using cookies");
|
||||
String faCookies = Utils.getConfigString("furaffinity.cookies", "a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267");
|
||||
warnAboutSharedAccount(faCookies);
|
||||
cookies = getCookiesFromString(faCookies);
|
||||
}
|
||||
}
|
||||
|
||||
private void warnAboutSharedAccount(String loginCookies) {
|
||||
if (loginCookies.equals("a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267")) {
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED,
|
||||
"WARNING: Using the shared furaffinity account exposes both your IP and how many items you downloaded to the other users of the share account");
|
||||
}
|
||||
}
|
||||
|
||||
// Thread pool for finding direct image links from "image" pages (html)
|
||||
@ -61,6 +77,8 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
setCookies();
|
||||
LOGGER.info(Http.url(url).cookies(cookies).get().html());
|
||||
return Http.url(url).cookies(cookies).get();
|
||||
}
|
||||
|
||||
@ -68,7 +86,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements nextPageUrl = doc.select("a.right");
|
||||
if (nextPageUrl.size() == 0) {
|
||||
if (nextPageUrl.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = urlBase + nextPageUrl.first().attr("href");
|
||||
@ -80,12 +98,21 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private String getImageFromPost(String url) {
|
||||
sleep(1000);
|
||||
Document d = null;
|
||||
try {
|
||||
logger.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
|
||||
return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content");
|
||||
d = Http.url(url).cookies(cookies).get();
|
||||
Elements links = d.getElementsByTag("a");
|
||||
for (Element link : links) {
|
||||
if (link.text().equals("Download")) {
|
||||
LOGGER.info("Found image " + link.attr("href"));
|
||||
return "https:" + link.attr("href");
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
return "";
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -93,7 +120,12 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
List<String> urls = new ArrayList<>();
|
||||
Elements urlElements = page.select("figure.t-image > b > u > a");
|
||||
for (Element e : urlElements) {
|
||||
urls.add(getImageFromPost(urlBase + e.select("a").first().attr("href")));
|
||||
String urlToAdd = getImageFromPost(urlBase + e.select("a").first().attr("href"));
|
||||
if (url != null) {
|
||||
if (urlToAdd.startsWith("http")) {
|
||||
urls.add(urlToAdd);
|
||||
}
|
||||
}
|
||||
}
|
||||
return urls;
|
||||
}
|
||||
@ -103,7 +135,7 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
Elements urlElements = page.select("figure.t-image > b > u > a");
|
||||
for (Element e : urlElements) {
|
||||
urls.add(urlBase + e.select("a").first().attr("href"));
|
||||
logger.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
|
||||
LOGGER.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
|
||||
}
|
||||
return urls;
|
||||
}
|
||||
@ -121,22 +153,22 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
|
||||
// Try to find the description
|
||||
Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
|
||||
if (els.size() == 0) {
|
||||
logger.debug("No description at " + page);
|
||||
if (els.isEmpty()) {
|
||||
LOGGER.debug("No description at " + page);
|
||||
throw new IOException("No description found");
|
||||
}
|
||||
logger.debug("Description found!");
|
||||
LOGGER.debug("Description found!");
|
||||
Document documentz = resp.parse();
|
||||
Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is.
|
||||
// Would break completely if FurAffinity changed site layout.
|
||||
documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
|
||||
ele.select("br").append("\\n");
|
||||
ele.select("p").prepend("\\n\\n");
|
||||
logger.debug("Returning description at " + page);
|
||||
LOGGER.debug("Returning description at " + page);
|
||||
String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
|
||||
return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
|
||||
} catch (IOException ioe) {
|
||||
logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
|
||||
LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -171,12 +203,12 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
out.write(text.getBytes());
|
||||
out.close();
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||
LOGGER.error("[!] Error creating save file path for description '" + url + "':", e);
|
||||
return false;
|
||||
}
|
||||
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||
LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs);
|
||||
if (!saveFileAs.getParentFile().exists()) {
|
||||
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
||||
saveFileAs.getParentFile().mkdirs();
|
||||
}
|
||||
return true;
|
||||
@ -200,16 +232,5 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
+ " instead");
|
||||
}
|
||||
|
||||
private class FuraffinityDocumentThread extends Thread {
|
||||
private URL url;
|
||||
|
||||
FuraffinityDocumentThread(URL url) {
|
||||
super();
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -67,7 +67,7 @@ public class FuskatorRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
baseUrl = URLDecoder.decode(baseUrl, "UTF-8");
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
logger.warn("Error while decoding " + baseUrl, e);
|
||||
LOGGER.warn("Error while decoding " + baseUrl, e);
|
||||
}
|
||||
if (baseUrl.startsWith("//")) {
|
||||
baseUrl = "http:" + baseUrl;
|
||||
|
@ -40,7 +40,7 @@ public class GirlsOfDesireRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + elems.first().text();
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ public class HbrowseRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + title + "_" + getGID(url);
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public boolean pageContainsAlbums(URL url) {
|
||||
logger.info("Page contains albums");
|
||||
LOGGER.info("Page contains albums");
|
||||
Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
|
||||
Matcher mat = pat.matcher(url.toExternalForm());
|
||||
if (mat.matches()) {
|
||||
@ -95,7 +95,7 @@ public class Hentai2readRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + getGID(url);
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
cookies.putAll(resp.cookies());
|
||||
}
|
||||
else {
|
||||
logger.info("unable to find csrf_token and set filter");
|
||||
LOGGER.info("unable to find csrf_token and set filter");
|
||||
}
|
||||
|
||||
resp = Http.url(url)
|
||||
@ -111,7 +111,7 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.select("li.next.hidden").size() != 0) {
|
||||
if (!doc.select("li.next.hidden").isEmpty()) {
|
||||
// Last page
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
@ -139,19 +139,19 @@ public class HentaifoundryRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
Matcher imgMatcher = imgRegex.matcher(thumb.attr("href"));
|
||||
if (!imgMatcher.matches()) {
|
||||
logger.info("Couldn't find user & image ID in " + thumb.attr("href"));
|
||||
LOGGER.info("Couldn't find user & image ID in " + thumb.attr("href"));
|
||||
continue;
|
||||
}
|
||||
Document imagePage;
|
||||
try {
|
||||
|
||||
logger.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
||||
LOGGER.info("grabbing " + "http://www.hentai-foundry.com" + thumb.attr("href"));
|
||||
imagePage = Http.url("http://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
|
||||
}
|
||||
|
||||
catch (IOException e) {
|
||||
logger.debug(e.getMessage());
|
||||
logger.debug("Warning: imagePage is null!");
|
||||
LOGGER.debug(e.getMessage());
|
||||
LOGGER.debug("Warning: imagePage is null!");
|
||||
imagePage = null;
|
||||
}
|
||||
// This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
|
||||
|
@ -9,9 +9,7 @@ import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
@ -57,7 +55,7 @@ public class HitomiRipper extends AbstractHTMLRipper {
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
String json = doc.text().replaceAll("var galleryinfo =", "");
|
||||
logger.info(json);
|
||||
LOGGER.info(json);
|
||||
JSONArray json_data = new JSONArray(json);
|
||||
for (int i = 0; i < json_data.length(); i++) {
|
||||
result.add("https://0a.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
|
||||
|
@ -14,8 +14,6 @@ import org.jsoup.nodes.Element;
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import javax.print.Doc;
|
||||
|
||||
public class HypnohubRipper extends AbstractHTMLRipper {
|
||||
|
||||
public HypnohubRipper(URL url) throws IOException {
|
||||
@ -55,14 +53,14 @@ public class HypnohubRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private String ripPost(String url) throws IOException {
|
||||
logger.info(url);
|
||||
LOGGER.info(url);
|
||||
Document doc = Http.url(url).get();
|
||||
return "https:" + doc.select("img.image").attr("src");
|
||||
|
||||
}
|
||||
|
||||
private String ripPost(Document doc) {
|
||||
logger.info(url);
|
||||
LOGGER.info(url);
|
||||
return "https:" + doc.select("img.image").attr("src");
|
||||
|
||||
}
|
||||
|
@ -99,16 +99,16 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
||||
// Attempt to use album title as GID
|
||||
Elements elems = getFirstPage().select("legend");
|
||||
String title = elems.first().text();
|
||||
logger.info("Title text: '" + title + "'");
|
||||
LOGGER.info("Title text: '" + title + "'");
|
||||
Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$");
|
||||
Matcher m = p.matcher(title);
|
||||
if (m.matches()) {
|
||||
return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")";
|
||||
}
|
||||
logger.info("Doesn't match " + p.pattern());
|
||||
LOGGER.info("Doesn't match " + p.pattern());
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -148,14 +148,14 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
||||
//the direct link to the image seems to always be linked in the <meta> part of the html.
|
||||
if (metaTag.attr("property").equals("og:image")) {
|
||||
imgsrc = metaTag.attr("content");
|
||||
logger.info("Found URL " + imgsrc);
|
||||
LOGGER.info("Found URL " + imgsrc);
|
||||
break;//only one (useful) image possible for an "image page".
|
||||
}
|
||||
}
|
||||
|
||||
//for debug, or something goes wrong.
|
||||
if (imgsrc.isEmpty()) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
LOGGER.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ public class ImagebamRipper extends AbstractHTMLRipper {
|
||||
|
||||
addURLToDownload(new URL(imgsrc), prefix);
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
|
||||
newURL += "p";
|
||||
}
|
||||
newURL += "gid=" + gid + "&view=2";
|
||||
logger.debug("Changed URL from " + url + " to " + newURL);
|
||||
LOGGER.debug("Changed URL from " + url + " to " + newURL);
|
||||
return new URL(newURL);
|
||||
}
|
||||
|
||||
|
@ -101,8 +101,8 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
|
||||
.get();
|
||||
// Find image
|
||||
Elements images = doc.select("a > img");
|
||||
if (images.size() == 0) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
if (images.isEmpty()) {
|
||||
LOGGER.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
Element image = images.first();
|
||||
@ -115,7 +115,7 @@ public class ImagevenueRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
addURLToDownload(new URL(imgsrc), prefix);
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -108,24 +108,24 @@ public class ImgurRipper extends AlbumRipper {
|
||||
String title = null;
|
||||
final String defaultTitle1 = "Imgur: The most awesome images on the Internet";
|
||||
final String defaultTitle2 = "Imgur: The magic of the Internet";
|
||||
logger.info("Trying to get album title");
|
||||
LOGGER.info("Trying to get album title");
|
||||
elems = albumDoc.select("meta[property=og:title]");
|
||||
if (elems != null) {
|
||||
title = elems.attr("content");
|
||||
logger.debug("Title is " + title);
|
||||
LOGGER.debug("Title is " + title);
|
||||
}
|
||||
// This is here encase the album is unnamed, to prevent
|
||||
// Imgur: The most awesome images on the Internet from being added onto the album name
|
||||
if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) {
|
||||
logger.debug("Album is untitled or imgur is returning the default title");
|
||||
LOGGER.debug("Album is untitled or imgur is returning the default title");
|
||||
// We set the title to "" here because if it's found in the next few attempts it will be changed
|
||||
// but if it's nto found there will be no reason to set it later
|
||||
title = "";
|
||||
logger.debug("Trying to use title tag to get title");
|
||||
LOGGER.debug("Trying to use title tag to get title");
|
||||
elems = albumDoc.select("title");
|
||||
if (elems != null) {
|
||||
if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) {
|
||||
logger.debug("Was unable to get album title or album was untitled");
|
||||
LOGGER.debug("Was unable to get album title or album was untitled");
|
||||
}
|
||||
else {
|
||||
title = elems.text();
|
||||
@ -159,29 +159,29 @@ public class ImgurRipper extends AlbumRipper {
|
||||
case ALBUM:
|
||||
// Fall-through
|
||||
case USER_ALBUM:
|
||||
logger.info("Album type is USER_ALBUM");
|
||||
LOGGER.info("Album type is USER_ALBUM");
|
||||
// Don't call getAlbumTitle(this.url) with this
|
||||
// as it seems to cause the album to be downloaded to a subdir.
|
||||
ripAlbum(this.url);
|
||||
break;
|
||||
case SERIES_OF_IMAGES:
|
||||
logger.info("Album type is SERIES_OF_IMAGES");
|
||||
LOGGER.info("Album type is SERIES_OF_IMAGES");
|
||||
ripAlbum(this.url);
|
||||
break;
|
||||
case SINGLE_IMAGE:
|
||||
logger.info("Album type is SINGLE_IMAGE");
|
||||
LOGGER.info("Album type is SINGLE_IMAGE");
|
||||
ripSingleImage(this.url);
|
||||
break;
|
||||
case USER:
|
||||
logger.info("Album type is USER");
|
||||
LOGGER.info("Album type is USER");
|
||||
ripUserAccount(url);
|
||||
break;
|
||||
case SUBREDDIT:
|
||||
logger.info("Album type is SUBREDDIT");
|
||||
LOGGER.info("Album type is SUBREDDIT");
|
||||
ripSubreddit(url);
|
||||
break;
|
||||
case USER_IMAGES:
|
||||
logger.info("Album type is USER_IMAGES");
|
||||
LOGGER.info("Album type is USER_IMAGES");
|
||||
ripUserImages(url);
|
||||
break;
|
||||
}
|
||||
@ -241,7 +241,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
String[] imageIds = m.group(1).split(",");
|
||||
for (String imageId : imageIds) {
|
||||
// TODO: Fetch image with ID imageId
|
||||
logger.debug("Fetching image info for ID " + imageId);
|
||||
LOGGER.debug("Fetching image info for ID " + imageId);
|
||||
try {
|
||||
JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON();
|
||||
if (!json.has("image")) {
|
||||
@ -259,7 +259,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
ImgurImage theImage = new ImgurImage(new URL(original));
|
||||
album.addImage(theImage);
|
||||
} catch (Exception e) {
|
||||
logger.error("Got exception while fetching imgur ID " + imageId, e);
|
||||
LOGGER.error("Got exception while fetching imgur ID " + imageId, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -271,7 +271,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
if (!strUrl.contains(",")) {
|
||||
strUrl += "/all";
|
||||
}
|
||||
logger.info(" Retrieving " + strUrl);
|
||||
LOGGER.info(" Retrieving " + strUrl);
|
||||
Document doc = getDocument(strUrl);
|
||||
// Try to use embedded JSON to retrieve images
|
||||
Matcher m = getEmbeddedJsonMatcher(doc);
|
||||
@ -283,7 +283,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
.getJSONArray("images");
|
||||
return createImgurAlbumFromJsonArray(url, jsonImages);
|
||||
} catch (JSONException e) {
|
||||
logger.debug("Error while parsing JSON at " + url + ", continuing", e);
|
||||
LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,10 +291,10 @@ public class ImgurRipper extends AlbumRipper {
|
||||
// http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID
|
||||
// At the least, get the thumbnails.
|
||||
|
||||
logger.info("[!] Falling back to /noscript method");
|
||||
LOGGER.info("[!] Falling back to /noscript method");
|
||||
|
||||
String newUrl = url.toExternalForm() + "/noscript";
|
||||
logger.info(" Retrieving " + newUrl);
|
||||
LOGGER.info(" Retrieving " + newUrl);
|
||||
doc = Jsoup.connect(newUrl)
|
||||
.userAgent(USER_AGENT)
|
||||
.get();
|
||||
@ -304,14 +304,14 @@ public class ImgurRipper extends AlbumRipper {
|
||||
ImgurAlbum imgurAlbum = new ImgurAlbum(url);
|
||||
for (Element thumb : doc.select("div.image")) {
|
||||
String image;
|
||||
if (thumb.select("a.zoom").size() > 0) {
|
||||
if (!thumb.select("a.zoom").isEmpty()) {
|
||||
// Clickably full-size
|
||||
image = "http:" + thumb.select("a").attr("href");
|
||||
} else if (thumb.select("img").size() > 0) {
|
||||
} else if (!thumb.select("img").isEmpty()) {
|
||||
image = "http:" + thumb.select("img").attr("src");
|
||||
} else {
|
||||
// Unable to find image in this div
|
||||
logger.error("[!] Unable to find image in div: " + thumb.toString());
|
||||
LOGGER.error("[!] Unable to find image in div: " + thumb.toString());
|
||||
continue;
|
||||
}
|
||||
if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
|
||||
@ -368,7 +368,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
* @throws IOException
|
||||
*/
|
||||
private void ripUserAccount(URL url) throws IOException {
|
||||
logger.info("Retrieving " + url);
|
||||
LOGGER.info("Retrieving " + url);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
|
||||
Document doc = Http.url(url).get();
|
||||
for (Element album : doc.select("div.cover a")) {
|
||||
@ -383,7 +383,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
ripAlbum(albumURL, albumID);
|
||||
Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000);
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while ripping album: " + e.getMessage(), e);
|
||||
LOGGER.error("Error while ripping album: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -420,7 +420,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while ripping user images: " + e.getMessage(), e);
|
||||
LOGGER.error("Error while ripping user images: " + e.getMessage(), e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -435,7 +435,7 @@ public class ImgurRipper extends AlbumRipper {
|
||||
pageURL += "/";
|
||||
}
|
||||
pageURL += "page/" + page + "/miss?scrolled";
|
||||
logger.info(" Retrieving " + pageURL);
|
||||
LOGGER.info(" Retrieving " + pageURL);
|
||||
Document doc = Http.url(pageURL).get();
|
||||
Elements imgs = doc.select(".post img");
|
||||
for (Element img : imgs) {
|
||||
@ -449,14 +449,14 @@ public class ImgurRipper extends AlbumRipper {
|
||||
URL imageURL = new URL(image);
|
||||
addURLToDownload(imageURL);
|
||||
}
|
||||
if (imgs.size() == 0) {
|
||||
if (imgs.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
page++;
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Interrupted while waiting to load next album: ", e);
|
||||
LOGGER.error("Interrupted while waiting to load next album: ", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import org.jsoup.Connection;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
@ -67,7 +66,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
URL san_url = new URL(url.toExternalForm().replaceAll("\\?hl=\\S*", ""));
|
||||
logger.info("sanitized URL is " + san_url.toExternalForm());
|
||||
LOGGER.info("sanitized URL is " + san_url.toExternalForm());
|
||||
return san_url;
|
||||
}
|
||||
|
||||
@ -77,6 +76,10 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return url.replaceAll("/[A-Z0-9]{8}/", "/");
|
||||
}
|
||||
|
||||
@Override public boolean hasASAPRipping() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private List<String> getPostsFromSinglePage(JSONObject json) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
JSONArray datas;
|
||||
@ -184,7 +187,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
@Override
|
||||
public JSONObject getFirstPage() throws IOException {
|
||||
Connection.Response resp = Http.url(url).response();
|
||||
logger.info(resp.cookies());
|
||||
LOGGER.info(resp.cookies());
|
||||
csrftoken = resp.cookie("csrftoken");
|
||||
Document p = resp.parse();
|
||||
// Get the query hash so we can download the next page
|
||||
@ -197,7 +200,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
Document doc = Http.url("https://www.instagram.com/p/" + videoID).get();
|
||||
return doc.select("meta[property=og:video]").attr("content");
|
||||
} catch (IOException e) {
|
||||
logger.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
|
||||
LOGGER.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
|
||||
}
|
||||
return "";
|
||||
}
|
||||
@ -232,9 +235,23 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return imageURL;
|
||||
}
|
||||
|
||||
public String getAfter(JSONObject json) {
|
||||
try {
|
||||
return json.getJSONObject("entry_data").getJSONArray("ProfilePage").getJSONObject(0)
|
||||
.getJSONObject("graphql").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONObject("page_info").getString("end_cursor");
|
||||
} catch (JSONException e) {
|
||||
return json.getJSONObject("data").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONObject("page_info").getString("end_cursor");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromJSON(JSONObject json) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
if (!url.toExternalForm().contains("/p/")) {
|
||||
nextPageID = getAfter(json);
|
||||
}
|
||||
|
||||
// get the rhx_gis value so we can get the next page later on
|
||||
if (rhx_gis == null) {
|
||||
@ -247,7 +264,8 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
try {
|
||||
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
||||
userID = profilePage.getJSONObject(0).getString("logging_page_id").replaceAll("profilePage_", "");
|
||||
datas = profilePage.getJSONObject(0).getJSONObject("graphql").getJSONObject("user")
|
||||
datas = json.getJSONObject("entry_data").getJSONArray("ProfilePage").getJSONObject(0)
|
||||
.getJSONObject("graphql").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||
} catch (JSONException e) {
|
||||
datas = json.getJSONObject("data").getJSONObject("user")
|
||||
@ -279,15 +297,15 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
logger.error("Unable to download slide show, URL was malformed");
|
||||
LOGGER.error("Unable to download slide show, URL was malformed");
|
||||
} catch (IOException e) {
|
||||
logger.error("Unable to download slide show");
|
||||
LOGGER.error("Unable to download slide show");
|
||||
}
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (!data.getBoolean("is_video")) {
|
||||
if (imageURLs.size() == 0) {
|
||||
if (imageURLs.isEmpty()) {
|
||||
// We add this one item to the array because either wise
|
||||
// the ripper will error out because we returned an empty array
|
||||
imageURLs.add(getOriginalUrl(data.getString("display_url")));
|
||||
@ -301,18 +319,17 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
}
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
LOGGER.info("Got MalformedURLException");
|
||||
return imageURLs;
|
||||
}
|
||||
|
||||
nextPageID = data.getString("id");
|
||||
|
||||
if (isThisATest()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} else { // We're ripping from a single page
|
||||
logger.info("Ripping from single page");
|
||||
LOGGER.info("Ripping from single page");
|
||||
imageURLs = getPostsFromSinglePage(json);
|
||||
}
|
||||
|
||||
@ -321,7 +338,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
|
||||
private String getIGGis(String variables) {
|
||||
String stringToMD5 = rhx_gis + ":" + variables;
|
||||
logger.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||
LOGGER.debug("String to md5 is \"" + stringToMD5 + "\"");
|
||||
try {
|
||||
byte[] bytesOfMessage = stringToMD5.getBytes("UTF-8");
|
||||
|
||||
@ -355,7 +372,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
||||
"&variables=" + vars, ig_gis);
|
||||
// Sleep for a while to avoid a ban
|
||||
logger.info(toreturn);
|
||||
LOGGER.info(toreturn);
|
||||
if (!pageHasImages(toreturn)) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
@ -369,10 +386,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
try {
|
||||
// Sleep for a while to avoid a ban
|
||||
sleep(2500);
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":12,\"after\":\"" + nextPageID + "\"}";
|
||||
String ig_gis = getIGGis(vars);
|
||||
logger.info(ig_gis);
|
||||
LOGGER.info(ig_gis);
|
||||
|
||||
LOGGER.info("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars);
|
||||
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
|
||||
if (!pageHasImages(toreturn)) {
|
||||
throw new IOException("No more pages");
|
||||
@ -392,6 +410,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
}
|
||||
|
||||
private boolean pageHasImages(JSONObject json) {
|
||||
LOGGER.info(json);
|
||||
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
||||
if (numberOfImages == 0) {
|
||||
@ -419,11 +438,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return new JSONObject(sb.toString());
|
||||
|
||||
} catch (MalformedURLException e) {
|
||||
logger.info("Unable to get query_hash, " + url + " is a malformed URL");
|
||||
LOGGER.info("Unable to get query_hash, " + url + " is a malformed URL");
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
logger.info("Unable to get query_hash");
|
||||
logger.info(e.getMessage());
|
||||
LOGGER.info("Unable to get query_hash");
|
||||
LOGGER.info(e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -444,24 +463,19 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
in.close();
|
||||
|
||||
} catch (MalformedURLException e) {
|
||||
logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
||||
LOGGER.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
logger.info("Unable to get query_hash");
|
||||
logger.info(e.getMessage());
|
||||
LOGGER.info("Unable to get query_hash");
|
||||
LOGGER.info(e.getMessage());
|
||||
return null;
|
||||
}
|
||||
if (!rippingTag) {
|
||||
Pattern jsP = Pattern.compile("o},queryId:.([a-zA-Z0-9]+).");
|
||||
Pattern jsP = Pattern.compile("byUserId\\.get\\(t\\)\\)\\|\\|void 0===r\\?void 0:r\\.pagination},queryId:.([a-zA-Z0-9]+)");
|
||||
Matcher m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
|
||||
m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
} else {
|
||||
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||
@ -470,7 +484,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return m.group(1);
|
||||
}
|
||||
}
|
||||
logger.error("Could not find query_hash on " + jsFileURL);
|
||||
LOGGER.error("Could not find query_hash on " + jsFileURL);
|
||||
return null;
|
||||
|
||||
}
|
||||
|
@ -54,16 +54,16 @@ public class JagodibujaRipper extends AbstractHTMLRipper {
|
||||
sleep(500);
|
||||
Document comicPage = Http.url(comicPageUrl.attr("href")).get();
|
||||
Element elem = comicPage.select("span.full-size-link > a").first();
|
||||
logger.info("Got link " + elem.attr("href"));
|
||||
LOGGER.info("Got link " + elem.attr("href"));
|
||||
try {
|
||||
addURLToDownload(new URL(elem.attr("href")), "");
|
||||
} catch (MalformedURLException e) {
|
||||
logger.warn("Malformed URL");
|
||||
LOGGER.warn("Malformed URL");
|
||||
e.printStackTrace();
|
||||
}
|
||||
result.add(elem.attr("href"));
|
||||
} catch (IOException e) {
|
||||
logger.info("Error loading " + comicPageUrl);
|
||||
LOGGER.info("Error loading " + comicPageUrl);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -36,7 +36,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
|
||||
// "url" is an instance field of the superclass
|
||||
Document page = Http.url(url).get();
|
||||
URL firstUrl = new URL("https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
||||
logger.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
||||
LOGGER.info("First page is " + "https://luscious.net" + page.select("div > div.album_cover_item > a").first().attr("href"));
|
||||
return Http.url(firstUrl).get();
|
||||
}
|
||||
|
||||
|
@ -15,8 +15,6 @@ import org.jsoup.nodes.Element;
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import javax.print.Doc;
|
||||
|
||||
public class ManganeloRipper extends AbstractHTMLRipper {
|
||||
|
||||
public ManganeloRipper(URL url) throws IOException {
|
||||
@ -67,7 +65,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private List<String> getURLsFromChap(String url) {
|
||||
logger.debug("Getting urls from " + url);
|
||||
LOGGER.debug("Getting urls from " + url);
|
||||
List<String> result = new ArrayList<>();
|
||||
try {
|
||||
Document doc = Http.url(url).get();
|
||||
@ -82,7 +80,7 @@ public class ManganeloRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private List<String> getURLsFromChap(Document doc) {
|
||||
logger.debug("Getting urls from " + url);
|
||||
LOGGER.debug("Getting urls from " + url);
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element el : doc.select("img.img_content")) {
|
||||
result.add(el.attr("src"));
|
||||
|
@ -75,7 +75,7 @@ public class MotherlessRipper extends AlbumRipper {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
logger.info("Retrieving " + nextURL);
|
||||
LOGGER.info("Retrieving " + nextURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
||||
Document doc = Http.url(nextURL)
|
||||
.referrer("http://motherless.com")
|
||||
@ -152,10 +152,10 @@ public class MotherlessRipper extends AlbumRipper {
|
||||
}
|
||||
addURLToDownload(new URL(file), prefix);
|
||||
} else {
|
||||
logger.warn("[!] could not find '__fileurl' at " + url);
|
||||
LOGGER.warn("[!] could not find '__fileurl' at " + url);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
Pattern p = Pattern.compile("/index.php/[a-zA-Z0-9_-]*\\?page=\\d");
|
||||
Matcher m = p.matcher(nextPage);
|
||||
if (m.matches()) {
|
||||
nextUrl = "http://myhentaicomics.com" + m.group(0);
|
||||
nextUrl = "https://myhentaicomics.com" + m.group(0);
|
||||
}
|
||||
if (nextUrl.equals("")) {
|
||||
throw new IOException("No more pages");
|
||||
@ -120,7 +120,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
|
||||
if (!imageSource.startsWith("http://") && !imageSource.startsWith("https://")) {
|
||||
// We replace thumbs with resizes so we can the full sized images
|
||||
imageSource = imageSource.replace("thumbs", "resizes");
|
||||
result.add("http://myhentaicomics.com/" + imageSource);
|
||||
result.add("https://myhentaicomics.com" + imageSource);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -109,7 +109,7 @@ public class NatalieMuRipper extends AbstractHTMLRipper {
|
||||
imgUrl = imgUrl.replace("list_thumb_inbox","xlarge");
|
||||
// Don't download the same URL twice
|
||||
if (imageURLs.contains(imgUrl)) {
|
||||
logger.debug("Already attempted: " + imgUrl);
|
||||
LOGGER.debug("Already attempted: " + imgUrl);
|
||||
continue;
|
||||
}
|
||||
imageURLs.add(imgUrl);
|
||||
|
@ -43,7 +43,7 @@ public class NewsfilterRipper extends AlbumRipper {
|
||||
public void rip() throws IOException {
|
||||
String gid = getGID(this.url);
|
||||
String theurl = "http://newsfilter.org/gallery/" + gid;
|
||||
logger.info("Loading " + theurl);
|
||||
LOGGER.info("Loading " + theurl);
|
||||
|
||||
Connection.Response resp = Jsoup.connect(theurl)
|
||||
.timeout(5000)
|
||||
|
@ -78,7 +78,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
List<Pair> subAlbums = new ArrayList<>();
|
||||
int index = 0;
|
||||
subAlbums.add(new Pair(this.url.toExternalForm(), ""));
|
||||
while (subAlbums.size() > 0) {
|
||||
while (!subAlbums.isEmpty()) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
@ -86,7 +86,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
String nextURL = nextAlbum.first;
|
||||
String nextSubalbum = nextAlbum.second;
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, nextURL);
|
||||
logger.info(" Retrieving " + nextURL);
|
||||
LOGGER.info(" Retrieving " + nextURL);
|
||||
if (albumDoc == null) {
|
||||
albumDoc = Http.url(nextURL).get();
|
||||
}
|
||||
@ -116,7 +116,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
break;
|
||||
}
|
||||
} catch (MalformedURLException mue) {
|
||||
logger.warn("Invalid URL: " + imagePage);
|
||||
LOGGER.warn("Invalid URL: " + imagePage);
|
||||
}
|
||||
}
|
||||
if (isThisATest()) {
|
||||
@ -133,7 +133,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Interrupted while waiting to load next page", e);
|
||||
LOGGER.error("Interrupted while waiting to load next page", e);
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
@ -167,8 +167,8 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
.referrer(this.url)
|
||||
.get();
|
||||
Elements images = doc.select(".gbBlock img");
|
||||
if (images.size() == 0) {
|
||||
logger.error("Failed to find image at " + this.url);
|
||||
if (images.isEmpty()) {
|
||||
LOGGER.error("Failed to find image at " + this.url);
|
||||
return;
|
||||
}
|
||||
String file = images.first().attr("src");
|
||||
@ -181,7 +181,7 @@ public class NfsfwRipper extends AlbumRipper {
|
||||
}
|
||||
addURLToDownload(new URL(file), prefix, this.subdir);
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
@ -20,15 +19,33 @@ import java.util.regex.Pattern;
|
||||
|
||||
public class NhentaiRipper extends AbstractHTMLRipper {
|
||||
|
||||
// All sleep times are in milliseconds
|
||||
private static final int IMAGE_SLEEP_TIME = 1500;
|
||||
|
||||
private String albumTitle;
|
||||
private Document firstPage;
|
||||
|
||||
// Thread pool for finding direct image links from "image" pages (html)
|
||||
private DownloadThreadPool nhentaiThreadPool = new DownloadThreadPool("nhentai");
|
||||
|
||||
@Override
|
||||
public boolean hasQueueSupport() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean pageContainsAlbums(URL url) {
|
||||
Pattern pa = Pattern.compile("^https?://nhentai\\.net/tag/([a-zA-Z0-9_\\-]+)/?");
|
||||
Matcher ma = pa.matcher(url.toExternalForm());
|
||||
return ma.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAlbumsToQueue(Document doc) {
|
||||
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||
for (Element elem : doc.select("a.cover")) {
|
||||
urlsToAddToQueue.add("https://" + getDomain() + elem.attr("href"));
|
||||
}
|
||||
return urlsToAddToQueue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DownloadThreadPool getThreadPool() {
|
||||
return nhentaiThreadPool;
|
||||
@ -84,7 +101,7 @@ public class NhentaiRipper extends AbstractHTMLRipper {
|
||||
if (blackListedTags == null) {
|
||||
return null;
|
||||
}
|
||||
logger.info("Blacklisted tags " + blackListedTags[0]);
|
||||
LOGGER.info("Blacklisted tags " + blackListedTags[0]);
|
||||
List<String> tagsOnPage = getTags(doc);
|
||||
for (String tag : blackListedTags) {
|
||||
for (String pageTag : tagsOnPage) {
|
||||
@ -129,84 +146,17 @@ public class NhentaiRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
Elements thumbs = page.select(".gallerythumb");
|
||||
Elements thumbs = page.select("a.gallerythumb > img");
|
||||
for (Element el : thumbs) {
|
||||
String imageUrl = el.attr("href");
|
||||
imageURLs.add("https://nhentai.net" + imageUrl);
|
||||
imageURLs.add(el.attr("data-src").replaceAll("t\\.n", "i.n").replaceAll("t\\.", "."));
|
||||
}
|
||||
return imageURLs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
NHentaiImageThread t = new NHentaiImageThread(url, index, this.workingDir);
|
||||
nhentaiThreadPool.addThread(t);
|
||||
try {
|
||||
Thread.sleep(IMAGE_SLEEP_TIME);
|
||||
} catch (InterruptedException e) {
|
||||
logger.warn("Interrupted while waiting to load next image", e);
|
||||
}
|
||||
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
|
||||
}
|
||||
|
||||
private class NHentaiImageThread extends Thread {
|
||||
|
||||
private URL url;
|
||||
private int index;
|
||||
private File workingDir;
|
||||
|
||||
NHentaiImageThread(URL url, int index, File workingDir) {
|
||||
super();
|
||||
this.url = url;
|
||||
this.index = index;
|
||||
this.workingDir = workingDir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
fetchImage();
|
||||
}
|
||||
|
||||
private void fetchImage() {
|
||||
try {
|
||||
//Document doc = getPageWithRetries(this.url);
|
||||
Document doc = Http.url(this.url).get();
|
||||
|
||||
// Find image
|
||||
Elements images = doc.select("#image-container > a > img");
|
||||
if (images.size() == 0) {
|
||||
// Attempt to find image elsewise (Issue #41)
|
||||
images = doc.select("img#img");
|
||||
if (images.size() == 0) {
|
||||
logger.warn("Image not found at " + this.url);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Element image = images.first();
|
||||
String imgsrc = image.attr("src");
|
||||
logger.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||
|
||||
Pattern p = Pattern.compile("^https?://i.nhentai.net/galleries/\\d+/(.+)$");
|
||||
Matcher m = p.matcher(imgsrc);
|
||||
if (m.matches()) {
|
||||
// Manually discover filename from URL
|
||||
String savePath = this.workingDir + File.separator;
|
||||
if (Utils.getConfigBoolean("download.save_order", true)) {
|
||||
savePath += String.format("%03d_", index);
|
||||
}
|
||||
savePath += m.group(1);
|
||||
addURLToDownload(new URL(imgsrc), new File(savePath));
|
||||
} else {
|
||||
// Provide prefix and let the AbstractRipper "guess" the filename
|
||||
String prefix = "";
|
||||
if (Utils.getConfigBoolean("download.save_order", true)) {
|
||||
prefix = String.format("%03d_", index);
|
||||
}
|
||||
addURLToDownload(new URL(imgsrc), prefix);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
}
|
||||
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
logger.info(url);
|
||||
LOGGER.info(url);
|
||||
String u = url.toExternalForm();
|
||||
if (u.contains("?")) {
|
||||
u = u.substring(0, u.indexOf("?"));
|
||||
@ -54,7 +54,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
}
|
||||
Document albumDoc = pageResponse.parse();
|
||||
Elements els = albumDoc.select("div.libraryTitle > h1");
|
||||
if (els.size() == 0) {
|
||||
if (els.isEmpty()) {
|
||||
throw new IOException("Could not find libraryTitle at " + url);
|
||||
}
|
||||
return els.get(0).text();
|
||||
@ -92,7 +92,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
subsToRip.add(sub);
|
||||
}
|
||||
|
||||
while (subsToRip.size() > 0 && !isStopped()) {
|
||||
while (!subsToRip.isEmpty() && !isStopped()) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
@ -100,12 +100,12 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
}
|
||||
String nextSub = subsToRip.remove(0);
|
||||
rippedSubs.add(nextSub);
|
||||
logger.info("Attempting to rip next subalbum: " + nextSub);
|
||||
LOGGER.info("Attempting to rip next subalbum: " + nextSub);
|
||||
try {
|
||||
pageResponse = null;
|
||||
subalbums = ripAlbumAndGetSubalbums(nextSub);
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while ripping " + nextSub, e);
|
||||
LOGGER.error("Error while ripping " + nextSub, e);
|
||||
break;
|
||||
}
|
||||
for (String subalbum : subalbums) {
|
||||
@ -131,7 +131,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
pageIndex++;
|
||||
if (pageIndex > 1 || pageResponse == null) {
|
||||
url = theUrl + String.format("?sort=3&page=%d", pageIndex);
|
||||
logger.info(" Retrieving " + url);
|
||||
LOGGER.info(" Retrieving " + url);
|
||||
pageResponse = Http.url(url).response();
|
||||
}
|
||||
Document albumDoc = pageResponse.parse();
|
||||
@ -153,7 +153,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
if (jsonString == null) {
|
||||
logger.error("Unable to find JSON data at URL: " + url);
|
||||
LOGGER.error("Unable to find JSON data at URL: " + url);
|
||||
break;
|
||||
}
|
||||
JSONObject json = new JSONObject(jsonString);
|
||||
@ -189,7 +189,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
+ "&albumPath=" + currentAlbumPath // %2Falbums%2Fab10%2FSpazzySpizzy"
|
||||
+ "&json=1";
|
||||
try {
|
||||
logger.info("Loading " + apiUrl);
|
||||
LOGGER.info("Loading " + apiUrl);
|
||||
JSONObject json = Http.url(apiUrl).getJSON();
|
||||
JSONArray subalbums = json.getJSONObject("body").getJSONArray("subAlbums");
|
||||
for (int i = 0; i < subalbums.length(); i++) {
|
||||
@ -202,7 +202,7 @@ public class PhotobucketRipper extends AlbumRipper {
|
||||
result.add(suburl);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to get subalbums from " + apiUrl, e);
|
||||
LOGGER.error("Failed to get subalbums from " + apiUrl, e);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -60,10 +60,7 @@ public class PichunterRipper extends AbstractHTMLRipper {
|
||||
private boolean isPhotoSet(URL url) {
|
||||
Pattern p = Pattern.compile("https?://www.pichunter.com/gallery/\\d+/(\\S*)/?");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,83 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class PicstatioRipper extends AbstractHTMLRipper {
|
||||
|
||||
public PicstatioRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
private String getFullSizedImageFromURL(String fileName) {
|
||||
try {
|
||||
LOGGER.info("https://www.picstatio.com/wallpaper/" + fileName + "/download");
|
||||
return Http.url("https://www.picstatio.com/wallpaper/" + fileName + "/download").get().select("p.text-center > span > a").attr("href");
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "picstatio";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "picstatio.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://www.picstatio.com/([a-zA-Z1-9_-]*)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected picstatio URL format: " +
|
||||
"www.picstatio.com//ID - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.select("a.next_page") != null) {
|
||||
return Http.url("https://www.picstatio.com" + doc.select("a.next_page").attr("href")).get();
|
||||
}
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element e : doc.select("img.img")) {
|
||||
String imageName = e.parent().attr("href");
|
||||
LOGGER.info(getFullSizedImageFromURL(imageName.split("/")[2]));
|
||||
result.add(getFullSizedImageFromURL(imageName.split("/")[2]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -46,7 +46,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
try {
|
||||
// Attempt to use album title as GID
|
||||
if (albumDoc == null) {
|
||||
logger.info(" Retrieving " + url.toExternalForm());
|
||||
LOGGER.info(" Retrieving " + url.toExternalForm());
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, url.toString());
|
||||
albumDoc = Http.url(url).get();
|
||||
}
|
||||
@ -54,7 +54,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
return HOST + "_" + elems.get(0).text();
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.warn("Failed to get album title from " + url, e);
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -82,7 +82,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
String nextUrl = this.url.toExternalForm();
|
||||
|
||||
if (albumDoc == null) {
|
||||
logger.info(" Retrieving album page " + nextUrl);
|
||||
LOGGER.info(" Retrieving album page " + nextUrl);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, nextUrl);
|
||||
albumDoc = Http.url(nextUrl)
|
||||
.referrer(this.url)
|
||||
@ -91,9 +91,9 @@ public class PornhubRipper extends AlbumRipper {
|
||||
|
||||
// Find thumbnails
|
||||
Elements thumbs = albumDoc.select(".photoBlockBox li");
|
||||
if (thumbs.size() == 0) {
|
||||
logger.debug("albumDoc: " + albumDoc);
|
||||
logger.debug("No images found at " + nextUrl);
|
||||
if (thumbs.isEmpty()) {
|
||||
LOGGER.debug("albumDoc: " + albumDoc);
|
||||
LOGGER.debug("No images found at " + nextUrl);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(IMAGE_SLEEP_TIME);
|
||||
} catch (InterruptedException e) {
|
||||
logger.warn("Interrupted while waiting to load next image", e);
|
||||
LOGGER.warn("Interrupted while waiting to load next image", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
Elements images = doc.select("#photoImageSection img");
|
||||
Element image = images.first();
|
||||
String imgsrc = image.attr("src");
|
||||
logger.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||
LOGGER.info("Found URL " + imgsrc + " via " + images.get(0));
|
||||
|
||||
// Provide prefix and let the AbstractRipper "guess" the filename
|
||||
String prefix = "";
|
||||
@ -167,7 +167,7 @@ public class PornhubRipper extends AlbumRipper {
|
||||
addURLToDownload(imgurl, prefix);
|
||||
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
LOGGER.error("[!] Exception while loading/parsing " + this.url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,13 +4,11 @@ import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.json.JSONTokener;
|
||||
@ -21,8 +19,9 @@ import com.rarchives.ripme.utils.Http;
|
||||
import com.rarchives.ripme.utils.RipUtils;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import javax.swing.text.Document;
|
||||
import javax.swing.text.Element;
|
||||
|
||||
public class RedditRipper extends AlbumRipper {
|
||||
|
||||
@ -41,6 +40,10 @@ public class RedditRipper extends AlbumRipper {
|
||||
|
||||
private long lastRequestTime = 0;
|
||||
|
||||
private Boolean shouldAddURL() {
|
||||
return (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
return url.getHost().endsWith(DOMAIN);
|
||||
@ -67,6 +70,10 @@ public class RedditRipper extends AlbumRipper {
|
||||
public void rip() throws IOException {
|
||||
URL jsonURL = getJsonURL(this.url);
|
||||
while (true) {
|
||||
if (shouldAddURL()) {
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
break;
|
||||
}
|
||||
jsonURL = getAndParseAndReturnNext(jsonURL);
|
||||
if (jsonURL == null || isThisATest() || isStopped()) {
|
||||
break;
|
||||
@ -110,7 +117,7 @@ public class RedditRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(2000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.warn("Interrupted while sleeping", e);
|
||||
LOGGER.warn("Interrupted while sleeping", e);
|
||||
}
|
||||
return nextURL;
|
||||
}
|
||||
@ -122,7 +129,7 @@ public class RedditRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(timeDiff);
|
||||
} catch (InterruptedException e) {
|
||||
logger.warn("[!] Interrupted while waiting to load next page", e);
|
||||
LOGGER.warn("[!] Interrupted while waiting to load next page", e);
|
||||
return new JSONArray();
|
||||
}
|
||||
}
|
||||
@ -141,7 +148,7 @@ public class RedditRipper extends AlbumRipper {
|
||||
} else if (jsonObj instanceof JSONArray) {
|
||||
jsonArray = (JSONArray) jsonObj;
|
||||
} else {
|
||||
logger.warn("[!] Unable to parse JSON: " + jsonString);
|
||||
LOGGER.warn("[!] Unable to parse JSON: " + jsonString);
|
||||
}
|
||||
return jsonArray;
|
||||
}
|
||||
@ -185,6 +192,32 @@ public class RedditRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
private URL parseRedditVideoMPD(String vidURL) {
|
||||
org.jsoup.nodes.Document doc = null;
|
||||
try {
|
||||
doc = Http.url(vidURL + "/DASHPlaylist.mpd").ignoreContentType().get();
|
||||
int largestHeight = 0;
|
||||
String baseURL = null;
|
||||
// Loops over all the videos and finds the one with the largest height and sets baseURL to the base url of that video
|
||||
for (org.jsoup.nodes.Element e : doc.select("MPD > Period > AdaptationSet > Representation")) {
|
||||
String height = e.attr("height");
|
||||
if (height.equals("")) {
|
||||
height = "0";
|
||||
}
|
||||
if (largestHeight < Integer.parseInt(height)) {
|
||||
largestHeight = Integer.parseInt(height);
|
||||
baseURL = doc.select("MPD > Period > AdaptationSet > Representation[height=" + height + "]").select("BaseURL").text();
|
||||
}
|
||||
LOGGER.info("H " + e.attr("height") + " V " + e.attr("width"));
|
||||
}
|
||||
return new URL(vidURL + "/" + baseURL);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
private void handleURL(String theUrl, String id) {
|
||||
URL originalURL;
|
||||
try {
|
||||
@ -204,6 +237,11 @@ public class RedditRipper extends AlbumRipper {
|
||||
savePath += id + "-" + m.group(1) + ".jpg";
|
||||
addURLToDownload(urls.get(0), new File(savePath));
|
||||
}
|
||||
if (url.contains("v.redd.it")) {
|
||||
String savePath = this.workingDir + File.separator;
|
||||
savePath += id + "-" + url.split("/")[3] + ".mp4";
|
||||
addURLToDownload(parseRedditVideoMPD(urls.get(0).toExternalForm()), new File(savePath));
|
||||
}
|
||||
else {
|
||||
addURLToDownload(urls.get(0), id + "-", "", theUrl, null);
|
||||
}
|
||||
|
@ -37,10 +37,7 @@ public class Rule34Ripper extends AbstractHTMLRipper {
|
||||
public boolean canRip(URL url){
|
||||
Pattern p = Pattern.compile("https?://rule34.xxx/index.php\\?page=post&s=list&tags=([\\S]+)");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,10 +89,10 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
||||
String siteURL = "https://" + subDomain + "sankakucomplex.com";
|
||||
// Get the page the full sized image is on
|
||||
Document subPage = Http.url(siteURL + postLink).get();
|
||||
logger.info("Checking page " + siteURL + postLink);
|
||||
LOGGER.info("Checking page " + siteURL + postLink);
|
||||
imageURLs.add("https:" + subPage.select("div[id=stats] > ul > li > a[id=highres]").attr("href"));
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error while loading page " + postLink, e);
|
||||
LOGGER.warn("Error while loading page " + postLink, e);
|
||||
}
|
||||
}
|
||||
return imageURLs;
|
||||
@ -112,7 +112,7 @@ public class SankakuComplexRipper extends AbstractHTMLRipper {
|
||||
// Only logged in users can see past page 25
|
||||
// Trying to rip page 26 will throw a no images found error
|
||||
if (!nextPage.contains("page=26")) {
|
||||
logger.info("Getting next page: " + pagination.attr("abs:next-page-url"));
|
||||
LOGGER.info("Getting next page: " + pagination.attr("abs:next-page-url"));
|
||||
return Http.url(pagination.attr("abs:next-page-url")).cookies(cookies).get();
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
Element elem = doc.select("td.style5 > a > img").last();
|
||||
logger.info(elem.parent().attr("href"));
|
||||
LOGGER.info(elem.parent().attr("href"));
|
||||
if (elem == null || elem.parent().attr("href").equals("view.php?date=")) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
|
@ -65,9 +65,9 @@ public class StaRipper extends AbstractHTMLRipper {
|
||||
cookies.putAll(resp.cookies());
|
||||
thumbPage = resp.parse();
|
||||
} catch (MalformedURLException e) {
|
||||
logger.info(thumbPageURL + " is a malformed URL");
|
||||
LOGGER.info(thumbPageURL + " is a malformed URL");
|
||||
} catch (IOException e) {
|
||||
logger.info(e.getMessage());
|
||||
LOGGER.info(e.getMessage());
|
||||
}
|
||||
String imageDownloadUrl = thumbPage.select("a.dev-page-download").attr("href");
|
||||
if (imageDownloadUrl != null && !imageDownloadUrl.equals("")) {
|
||||
@ -97,10 +97,10 @@ public class StaRipper extends AbstractHTMLRipper {
|
||||
.followRedirects(false)
|
||||
.execute();
|
||||
String imageURL = response.header("Location");
|
||||
logger.info(imageURL);
|
||||
LOGGER.info(imageURL);
|
||||
return imageURL;
|
||||
} catch (IOException e) {
|
||||
logger.info("Got error message " + e.getMessage() + " trying to download " + url);
|
||||
LOGGER.info("Got error message " + e.getMessage() + " trying to download " + url);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
|
||||
List<String> urls = new ArrayList<>();
|
||||
String html = page.data();
|
||||
if (!html.contains("episodeList : ")) {
|
||||
logger.error("No 'episodeList' found at " + this.url);
|
||||
LOGGER.error("No 'episodeList' found at " + this.url);
|
||||
return urls;
|
||||
}
|
||||
String jsonString = Utils.between(html, "episodeList : ", ",\n").get(0);
|
||||
@ -93,7 +93,7 @@ public class TapasticRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while downloading " + url, e);
|
||||
LOGGER.error("[!] Exception while downloading " + url, e);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ public class TeenplanetRipper extends AlbumRipper {
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
int index = 0;
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
||||
if (albumDoc == null) {
|
||||
albumDoc = Http.url(url).get();
|
||||
|
@ -59,8 +59,8 @@ public class TheyiffgalleryRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element el : doc.select("ul.thumbnails > li.gdthumb")) {
|
||||
String imageSource = el.select("a > img").attr("src");
|
||||
for (Element el : doc.select("img.thumbnail")) {
|
||||
String imageSource = el.attr("src");
|
||||
imageSource = imageSource.replaceAll("_data/i", "");
|
||||
imageSource = imageSource.replaceAll("-\\w\\w_\\w\\d+x\\d+", "");
|
||||
result.add("https://theyiffgallery.com" + imageSource);
|
||||
|
@ -40,7 +40,7 @@ public class TsuminoRipper extends AbstractHTMLRipper {
|
||||
JSONObject json = new JSONObject(jsonInfo);
|
||||
return json.getJSONArray("reader_page_urls");
|
||||
} catch (IOException e) {
|
||||
logger.info(e);
|
||||
LOGGER.info(e);
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
|
||||
+ getAlbumID() + " and try again");
|
||||
return null;
|
||||
|
@ -23,8 +23,8 @@ import com.rarchives.ripme.utils.Utils;
|
||||
public class TumblrRipper extends AlbumRipper {
|
||||
|
||||
private static final String DOMAIN = "tumblr.com",
|
||||
HOST = "tumblr",
|
||||
IMAGE_PATTERN = "([^\\s]+(\\.(?i)(jpg|png|gif|bmp))$)";
|
||||
HOST = "tumblr",
|
||||
IMAGE_PATTERN = "([^\\s]+(\\.(?i)(jpg|png|gif|bmp))$)";
|
||||
|
||||
private enum ALBUM_TYPE {
|
||||
SUBDOMAIN,
|
||||
@ -37,11 +37,8 @@ public class TumblrRipper extends AlbumRipper {
|
||||
private static final String TUMBLR_AUTH_CONFIG_KEY = "tumblr.auth";
|
||||
|
||||
private static boolean useDefaultApiKey = false; // fall-back for bad user-specified key
|
||||
private static final List<String> APIKEYS = Arrays.asList("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX",
|
||||
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
|
||||
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
|
||||
private static int genNum = new Random().nextInt(APIKEYS.size());
|
||||
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
||||
private static String API_KEY = null;
|
||||
|
||||
|
||||
/**
|
||||
* Gets the API key.
|
||||
@ -49,20 +46,34 @@ public class TumblrRipper extends AlbumRipper {
|
||||
* @return Tumblr API key
|
||||
*/
|
||||
public static String getApiKey() {
|
||||
if (API_KEY == null) {
|
||||
API_KEY = pickRandomApiKey();
|
||||
}
|
||||
|
||||
if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) {
|
||||
logger.info("Using api key: " + API_KEY);
|
||||
LOGGER.info("Using api key: " + API_KEY);
|
||||
return API_KEY;
|
||||
} else {
|
||||
String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
|
||||
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
||||
LOGGER.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
|
||||
return userDefinedAPIKey;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static String pickRandomApiKey() {
|
||||
final List<String> APIKEYS = Arrays.asList("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX",
|
||||
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
|
||||
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
|
||||
int genNum = new Random().nextInt(APIKEYS.size());
|
||||
LOGGER.info(genNum);
|
||||
final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
|
||||
return API_KEY;
|
||||
}
|
||||
|
||||
public TumblrRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
if (API_KEY == null) {
|
||||
if (getApiKey() == null) {
|
||||
throw new IOException("Could not find tumblr authentication key in configuration");
|
||||
}
|
||||
}
|
||||
@ -85,10 +96,10 @@ public class TumblrRipper extends AlbumRipper {
|
||||
if (StringUtils.countMatches(u, ".") > 2) {
|
||||
url = new URL(u.replace(".tumblr.com", ""));
|
||||
if (isTumblrURL(url)) {
|
||||
logger.info("Detected tumblr site: " + url);
|
||||
LOGGER.info("Detected tumblr site: " + url);
|
||||
}
|
||||
else {
|
||||
logger.info("Not a tumblr site: " + url);
|
||||
LOGGER.info("Not a tumblr site: " + url);
|
||||
}
|
||||
}
|
||||
return url;
|
||||
@ -100,11 +111,11 @@ public class TumblrRipper extends AlbumRipper {
|
||||
checkURL += "/info?api_key=" + getApiKey();
|
||||
try {
|
||||
JSONObject json = Http.url(checkURL)
|
||||
.getJSON();
|
||||
.getJSON();
|
||||
int status = json.getJSONObject("meta").getInt("status");
|
||||
return status == 200;
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while checking possible tumblr domain: " + url.getHost(), e);
|
||||
LOGGER.error("Error while checking possible tumblr domain: " + url.getHost(), e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -139,7 +150,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
|
||||
|
||||
String apiURL = getTumblrApiURL(mediaType, offset);
|
||||
logger.info("Retrieving " + apiURL);
|
||||
LOGGER.info("Retrieving " + apiURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
||||
|
||||
JSONObject json = null;
|
||||
@ -154,7 +165,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
if (status.getStatusCode() == HttpURLConnection.HTTP_UNAUTHORIZED && !useDefaultApiKey) {
|
||||
retry = true;
|
||||
} else if (status.getStatusCode() == 429) {
|
||||
logger.error("Tumblr rate limit has been exceeded");
|
||||
LOGGER.error("Tumblr rate limit has been exceeded");
|
||||
sendUpdate(STATUS.DOWNLOAD_ERRORED,"Tumblr rate limit has been exceeded");
|
||||
exceededRateLimit = true;
|
||||
break;
|
||||
@ -167,7 +178,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
String apiKey = getApiKey();
|
||||
|
||||
String message = "401 Unauthorized. Will retry with default Tumblr API key: " + apiKey;
|
||||
logger.info(message);
|
||||
LOGGER.info(message);
|
||||
sendUpdate(STATUS.DOWNLOAD_WARN, message);
|
||||
|
||||
Utils.setConfigString(TUMBLR_AUTH_CONFIG_KEY, apiKey); // save the default key to the config
|
||||
@ -175,7 +186,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
// retry loading the JSON
|
||||
|
||||
apiURL = getTumblrApiURL(mediaType, offset);
|
||||
logger.info("Retrieving " + apiURL);
|
||||
LOGGER.info("Retrieving " + apiURL);
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, apiURL);
|
||||
|
||||
json = Http.url(apiURL).getJSON();
|
||||
@ -184,7 +195,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("[!] Interrupted while waiting to load next album:", e);
|
||||
LOGGER.error("[!] Interrupted while waiting to load next album:", e);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -213,7 +224,7 @@ public class TumblrRipper extends AlbumRipper {
|
||||
|
||||
posts = json.getJSONObject("response").getJSONArray("posts");
|
||||
if (posts.length() == 0) {
|
||||
logger.info(" Zero posts returned.");
|
||||
LOGGER.info(" Zero posts returned.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -224,8 +235,12 @@ public class TumblrRipper extends AlbumRipper {
|
||||
for (int j = 0; j < photos.length(); j++) {
|
||||
photo = photos.getJSONObject(j);
|
||||
try {
|
||||
if (Utils.getConfigBoolean("tumblr.get_raw_image", false)) {
|
||||
String urlString = photo.getJSONObject("original_size").getString("url").replaceAll("https", "http");
|
||||
String imageUrl = photo.getJSONObject("original_size").getString("url");
|
||||
// If the url is shorter than 65 chars long we skip it because it's those images don't support grabbing them in fullsize
|
||||
if (Utils.getConfigBoolean("tumblr.get_raw_image", false) &&
|
||||
imageUrl.replaceAll("https", "http").length() > 65) {
|
||||
// We have to change the link to http because tumblr uses an invalid cert for data.tumblr.com
|
||||
String urlString = imageUrl.replaceAll("https", "http");
|
||||
urlString = urlString.replaceAll("https?://[a-sA-Z0-9_\\-\\.]*\\.tumblr", "http://data.tumblr");
|
||||
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
|
||||
fileURL = new URL(urlString);
|
||||
@ -240,16 +255,16 @@ public class TumblrRipper extends AlbumRipper {
|
||||
addURLToDownload(redirectedURL);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("[!] Error while parsing photo in " + photo, e);
|
||||
LOGGER.error("[!] Error while parsing photo in " + photo, e);
|
||||
}
|
||||
}
|
||||
} else if (post.has("video_url")) {
|
||||
try {
|
||||
fileURL = new URL(post.getString("video_url").replaceAll("http", "https"));
|
||||
fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:"));
|
||||
addURLToDownload(fileURL);
|
||||
} catch (Exception e) {
|
||||
logger.error("[!] Error while parsing video in " + post, e);
|
||||
return true;
|
||||
LOGGER.error("[!] Error while parsing video in " + post, e);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (albumType == ALBUM_TYPE.POST) {
|
||||
@ -263,24 +278,24 @@ public class TumblrRipper extends AlbumRipper {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (albumType == ALBUM_TYPE.POST) {
|
||||
sb.append("http://api.tumblr.com/v2/blog/")
|
||||
.append(subdomain)
|
||||
.append("/posts?id=")
|
||||
.append(postNumber)
|
||||
.append("&api_key=")
|
||||
.append(getApiKey());
|
||||
.append(subdomain)
|
||||
.append("/posts?id=")
|
||||
.append(postNumber)
|
||||
.append("&api_key=")
|
||||
.append(getApiKey());
|
||||
return sb.toString();
|
||||
}
|
||||
sb.append("http://api.tumblr.com/v2/blog/")
|
||||
.append(subdomain)
|
||||
.append("/posts/")
|
||||
.append(mediaType)
|
||||
.append("?api_key=")
|
||||
.append(getApiKey())
|
||||
.append("&offset=")
|
||||
.append(offset);
|
||||
.append(subdomain)
|
||||
.append("/posts/")
|
||||
.append(mediaType)
|
||||
.append("?api_key=")
|
||||
.append(getApiKey())
|
||||
.append("&offset=")
|
||||
.append(offset);
|
||||
if (albumType == ALBUM_TYPE.TAG) {
|
||||
sb.append("&tag=")
|
||||
.append(tagName);
|
||||
sb.append("&tag=")
|
||||
.append(tagName);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
public class TwitterRipper extends AlbumRipper {
|
||||
|
||||
int downloadUrls = 1;
|
||||
|
||||
private static final String DOMAIN = "twitter.com",
|
||||
HOST = "twitter";
|
||||
|
||||
@ -103,13 +105,13 @@ public class TwitterRipper extends AlbumRipper {
|
||||
.getJSONObject(resource)
|
||||
.getJSONObject(api);
|
||||
int remaining = stats.getInt("remaining");
|
||||
logger.info(" Twitter " + resource + " calls remaining: " + remaining);
|
||||
LOGGER.info(" Twitter " + resource + " calls remaining: " + remaining);
|
||||
if (remaining < 20) {
|
||||
logger.error("Twitter API calls exhausted: " + stats.toString());
|
||||
LOGGER.error("Twitter API calls exhausted: " + stats.toString());
|
||||
throw new IOException("Less than 20 API calls remaining; not enough to rip.");
|
||||
}
|
||||
} catch (JSONException e) {
|
||||
logger.error("JSONException: ", e);
|
||||
LOGGER.error("JSONException: ", e);
|
||||
throw new IOException("Error while parsing JSON: " + body, e);
|
||||
}
|
||||
}
|
||||
@ -123,7 +125,7 @@ public class TwitterRipper extends AlbumRipper {
|
||||
.append("&include_entities=true")
|
||||
.append("&exclude_replies=true")
|
||||
.append("&trim_user=true")
|
||||
.append("&include_rts=false")
|
||||
.append("&include_rts=true")
|
||||
.append("&count=" + 200);
|
||||
break;
|
||||
case SEARCH:
|
||||
@ -142,7 +144,7 @@ public class TwitterRipper extends AlbumRipper {
|
||||
|
||||
private List<JSONObject> getTweets(String url) throws IOException {
|
||||
List<JSONObject> tweets = new ArrayList<>();
|
||||
logger.info(" Retrieving " + url);
|
||||
LOGGER.info(" Retrieving " + url);
|
||||
Document doc = Http.url(url)
|
||||
.ignoreContentType()
|
||||
.header("Authorization", "Bearer " + accessToken)
|
||||
@ -171,7 +173,7 @@ public class TwitterRipper extends AlbumRipper {
|
||||
private int parseTweet(JSONObject tweet) throws MalformedURLException {
|
||||
int parsedCount = 0;
|
||||
if (!tweet.has("extended_entities")) {
|
||||
logger.error("XXX Tweet doesn't have entitites");
|
||||
LOGGER.error("XXX Tweet doesn't have entitites");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -187,21 +189,35 @@ public class TwitterRipper extends AlbumRipper {
|
||||
url = media.getString("media_url");
|
||||
if (media.getString("type").equals("video")) {
|
||||
JSONArray variants = media.getJSONObject("video_info").getJSONArray("variants");
|
||||
int largestBitrate = 0;
|
||||
String urlToDownload = null;
|
||||
// Loop over all the video options and find the biggest video
|
||||
for (int j = 0; j < medias.length(); j++) {
|
||||
JSONObject variant = (JSONObject) variants.get(i);
|
||||
if (variant.has("bitrate") && variant.getInt("bitrate") == 832000) {
|
||||
addURLToDownload(new URL(variant.getString("url")));
|
||||
parsedCount++;
|
||||
break;
|
||||
LOGGER.info(variant);
|
||||
// If the video doesn't have a bitrate it's a m3u8 file we can't download
|
||||
if (variant.has("bitrate")) {
|
||||
if (variant.getInt("bitrate") > largestBitrate) {
|
||||
largestBitrate = variant.getInt("bitrate");
|
||||
urlToDownload = variant.getString("url");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (urlToDownload != null) {
|
||||
addURLToDownload(new URL(urlToDownload), getPrefix(downloadUrls));
|
||||
downloadUrls++;
|
||||
} else {
|
||||
LOGGER.error("URLToDownload was null");
|
||||
}
|
||||
parsedCount++;
|
||||
} else if (media.getString("type").equals("photo")) {
|
||||
if (url.contains(".twimg.com/")) {
|
||||
url += ":orig";
|
||||
addURLToDownload(new URL(url));
|
||||
addURLToDownload(new URL(url), getPrefix(downloadUrls));
|
||||
downloadUrls++;
|
||||
parsedCount++;
|
||||
} else {
|
||||
logger.debug("Unexpected media_url: " + url);
|
||||
LOGGER.debug("Unexpected media_url: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -211,6 +227,10 @@ public class TwitterRipper extends AlbumRipper {
|
||||
return parsedCount;
|
||||
}
|
||||
|
||||
public String getPrefix(int index) {
|
||||
return String.format("%03d_", index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
getAccessToken();
|
||||
@ -228,15 +248,15 @@ public class TwitterRipper extends AlbumRipper {
|
||||
int parsedCount = 0;
|
||||
for (int i = 0; i < MAX_REQUESTS; i++) {
|
||||
List<JSONObject> tweets = getTweets(getApiURL(lastMaxID - 1));
|
||||
if (tweets.size() == 0) {
|
||||
logger.info(" No more tweets found.");
|
||||
if (tweets.isEmpty()) {
|
||||
LOGGER.info(" No more tweets found.");
|
||||
break;
|
||||
}
|
||||
logger.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
|
||||
LOGGER.debug("Twitter response #" + (i + 1) + " Tweets:\n" + tweets);
|
||||
if (tweets.size() == 1 &&
|
||||
lastMaxID.equals(tweets.get(0).getString("id_str"))
|
||||
) {
|
||||
logger.info(" No more tweet found.");
|
||||
LOGGER.info(" No more tweet found.");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -256,7 +276,7 @@ public class TwitterRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(WAIT_TIME);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("[!] Interrupted while waiting to load more results", e);
|
||||
LOGGER.error("[!] Interrupted while waiting to load more results", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper {
|
||||
try {
|
||||
login();
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to login", e);
|
||||
LOGGER.error("Failed to login", e);
|
||||
}
|
||||
String url = getURL(getGID(this.url), offset);
|
||||
return Http.url(url)
|
||||
@ -82,7 +82,7 @@ public class TwodgalleriesRipper extends AbstractHTMLRipper {
|
||||
Document nextDoc = Http.url(url)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
if (nextDoc.select("div.hcaption > img").size() == 0) {
|
||||
if (nextDoc.select("div.hcaption > img").isEmpty()) {
|
||||
throw new IOException("No more images to retrieve");
|
||||
}
|
||||
return nextDoc;
|
||||
|
@ -43,7 +43,7 @@ public class ViewcomicRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ public class VkRipper extends AlbumRipper {
|
||||
String[] jsonStrings = doc.toString().split("<!>");
|
||||
JSONObject json = new JSONObject(jsonStrings[jsonStrings.length - 1]);
|
||||
JSONArray videos = json.getJSONArray("all");
|
||||
logger.info("Found " + videos.length() + " videos");
|
||||
LOGGER.info("Found " + videos.length() + " videos");
|
||||
for (int i = 0; i < videos.length(); i++) {
|
||||
JSONArray jsonVideo = videos.getJSONArray(i);
|
||||
int vidid = jsonVideo.getInt(1);
|
||||
@ -85,7 +85,7 @@ public class VkRipper extends AlbumRipper {
|
||||
try {
|
||||
Thread.sleep(500);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Interrupted while waiting to fetch next video URL", e);
|
||||
LOGGER.error("Interrupted while waiting to fetch next video URL", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -96,7 +96,7 @@ public class VkRipper extends AlbumRipper {
|
||||
Map<String,String> photoIDsToURLs = new HashMap<>();
|
||||
int offset = 0;
|
||||
while (true) {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
|
||||
// al=1&offset=80&part=1
|
||||
Map<String,String> postData = new HashMap<>();
|
||||
@ -119,7 +119,7 @@ public class VkRipper extends AlbumRipper {
|
||||
Set<String> photoIDsToGet = new HashSet<>();
|
||||
for (Element a : elements) {
|
||||
if (!a.attr("onclick").contains("showPhoto('")) {
|
||||
logger.error("a: " + a);
|
||||
LOGGER.error("a: " + a);
|
||||
continue;
|
||||
}
|
||||
String photoID = a.attr("onclick");
|
||||
@ -134,12 +134,12 @@ public class VkRipper extends AlbumRipper {
|
||||
try {
|
||||
photoIDsToURLs.putAll(getPhotoIDsToURLs(photoID));
|
||||
} catch (IOException e) {
|
||||
logger.error("Exception while retrieving photo id " + photoID, e);
|
||||
LOGGER.error("Exception while retrieving photo id " + photoID, e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!photoIDsToURLs.containsKey(photoID)) {
|
||||
logger.error("Could not find URL for photo ID: " + photoID);
|
||||
LOGGER.error("Could not find URL for photo ID: " + photoID);
|
||||
continue;
|
||||
}
|
||||
String url = photoIDsToURLs.get(photoID);
|
||||
|
@ -2,16 +2,14 @@ package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.Jsoup;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
@ -19,7 +17,11 @@ import org.jsoup.select.Elements;
|
||||
/**
|
||||
* For ripping VSCO pictures.
|
||||
*/
|
||||
public class VscoRipper extends AbstractHTMLRipper{
|
||||
public class VscoRipper extends AbstractHTMLRipper {
|
||||
|
||||
int pageNumber = 1;
|
||||
JSONObject profileJSON;
|
||||
|
||||
|
||||
private static final String DOMAIN = "vsco.co",
|
||||
HOST = "vsco";
|
||||
@ -73,33 +75,23 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
try {
|
||||
toRip.add(vscoImageToURL(url.toExternalForm()));
|
||||
} catch (IOException ex) {
|
||||
logger.debug("Failed to convert " + url.toString() + " to external form.");
|
||||
LOGGER.debug("Failed to convert " + url.toString() + " to external form.");
|
||||
}
|
||||
|
||||
} else {//want to rip a member profile
|
||||
/*
|
||||
String baseURL = "https://vsco.co";
|
||||
|
||||
|
||||
//Find all the relative links, adds Base URL, then adds them to an ArrayList
|
||||
List<URL> relativeLinks = new ArrayList<>();
|
||||
Elements links = page.getElementsByTag("a");
|
||||
|
||||
|
||||
for(Element link : links){
|
||||
System.out.println(link.toString());
|
||||
//if link includes "/media/", add it to the list
|
||||
if (link.attr("href").contains("/media")) {
|
||||
try {
|
||||
String relativeURL = vscoImageToURL(link.attr("href"));
|
||||
toRip.add(baseURL + relativeURL);
|
||||
} catch (IOException ex) {
|
||||
logger.debug("Could not add \"" + link.toString() + "\" to list for ripping.");
|
||||
}
|
||||
} else {
|
||||
String username = getUserName();
|
||||
String userTkn = getUserTkn(username);
|
||||
String siteID = getSiteID(userTkn, username);
|
||||
while (true) {
|
||||
profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID);
|
||||
for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) {
|
||||
toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url"));
|
||||
}
|
||||
if (pageNumber * 1000 > profileJSON.getInt("total")) {
|
||||
return toRip;
|
||||
}
|
||||
pageNumber++;
|
||||
}
|
||||
*/
|
||||
logger.debug("Sorry, RipMe currently only supports ripping single images.");
|
||||
|
||||
|
||||
}
|
||||
@ -107,6 +99,59 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
return toRip;
|
||||
}
|
||||
|
||||
private String getUserTkn(String username) {
|
||||
String userinfoPage = "https://vsco.co/content/Static/userinfo";
|
||||
String referer = "https://vsco.co/" + username + "/images/1";
|
||||
Map<String,String> cookies = new HashMap<>();
|
||||
cookies.put("vs_anonymous_id", UUID.randomUUID().toString());
|
||||
try {
|
||||
Element doc = Http.url(userinfoPage).cookies(cookies).referrer(referer).ignoreContentType().get().body();
|
||||
String json = doc.text().replaceAll("define\\(", "");
|
||||
json = json.replaceAll("\\)", "");
|
||||
return new JSONObject(json).getString("tkn");
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Could not get user tkn");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String getUserName() {
|
||||
Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)/images/[0-9]+");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
|
||||
if (m.matches()) {
|
||||
String user = m.group(1);
|
||||
return user;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) {
|
||||
String size = "1000";
|
||||
String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size;
|
||||
Map<String,String> cookies = new HashMap<>();
|
||||
cookies.put("vs", tkn);
|
||||
try {
|
||||
JSONObject j = Http.url(purl).cookies(cookies).getJSON();
|
||||
return j;
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Could not profile images");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String getSiteID(String tkn, String username) {
|
||||
Map<String,String> cookies = new HashMap<>();
|
||||
cookies.put("vs", tkn);
|
||||
try {
|
||||
JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON();
|
||||
return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id"));
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Could not get site id");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String vscoImageToURL(String url) throws IOException{
|
||||
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
|
||||
.get();
|
||||
@ -121,14 +166,14 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
|
||||
|
||||
result = givenURL;
|
||||
logger.debug("Found image URL: " + givenURL);
|
||||
break;//immediatly stop after getting URL (there should only be 1 image to be downloaded)
|
||||
LOGGER.debug("Found image URL: " + givenURL);
|
||||
break;//immediately stop after getting URL (there should only be 1 image to be downloaded)
|
||||
}
|
||||
}
|
||||
|
||||
//Means website changed, things need to be fixed.
|
||||
if (result.isEmpty()){
|
||||
logger.error("Could not find image URL at: " + url);
|
||||
LOGGER.error("Could not find image URL at: " + url);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -144,7 +189,7 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
|
||||
//Single Image
|
||||
Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9]+)/media/([a-zA-Z0-9]+)");
|
||||
Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
|
||||
if (m.matches()){
|
||||
@ -155,7 +200,7 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
}
|
||||
|
||||
//Member profile (Usernames should all be different, so this should work.
|
||||
p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9]+)/images/[0-9]+");
|
||||
p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)/images/[0-9]+");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
|
||||
if (m.matches()){
|
||||
@ -177,11 +222,6 @@ public class VscoRipper extends AbstractHTMLRipper{
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
return super.getNextPage(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
|
@ -38,10 +38,7 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
|
||||
public boolean canRip(URL url) {
|
||||
Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*");
|
||||
Matcher mat = pat.matcher(url.toExternalForm());
|
||||
if (mat.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return mat.matches();
|
||||
}
|
||||
|
||||
|
||||
|
@ -31,7 +31,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
// http://www.konradokonski.com/wiory/comic/08182008/
|
||||
// http://freeadultcomix.com/finders-feepaid-in-full-sparrow/
|
||||
// http://thisis.delvecomic.com/NewWP/comic/in-too-deep/
|
||||
// http://tnbtu.com/comic/01-00/
|
||||
// http://shipinbottle.pepsaga.com/?p=281
|
||||
|
||||
private static List<String> explicit_domains = Arrays.asList(
|
||||
@ -43,7 +42,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
"www.konradokonski.com",
|
||||
"freeadultcomix.com",
|
||||
"thisis.delvecomic.com",
|
||||
"tnbtu.com",
|
||||
"shipinbottle.pepsaga.com",
|
||||
"8muses.download",
|
||||
"spyingwithlana.com"
|
||||
@ -56,7 +54,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
"prismblush.com",
|
||||
"www.konradokonski.com",
|
||||
"thisis.delvecomic.com",
|
||||
"tnbtu.com",
|
||||
"spyingwithlana.com"
|
||||
);
|
||||
|
||||
@ -137,12 +134,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
return true;
|
||||
}
|
||||
|
||||
Pattern tnbtuPat = Pattern.compile("https?://tnbtu.com/comic/([0-9_\\-]*)/?$");
|
||||
Matcher tnbtuMat = tnbtuPat.matcher(url.toExternalForm());
|
||||
if (tnbtuMat.matches()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Pattern shipinbottlePat = Pattern.compile("https?://shipinbottle.pepsaga.com/\\?p=([0-9]*)/?$");
|
||||
Matcher shipinbottleMat =shipinbottlePat.matcher(url.toExternalForm());
|
||||
if (shipinbottleMat.matches()) {
|
||||
@ -277,12 +268,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
return getHost() + "_" + comicsxxxMat.group(1);
|
||||
}
|
||||
|
||||
Pattern tnbtuPat = Pattern.compile("https?://tnbtu.com/comic/([0-9_\\-]*)/?$");
|
||||
Matcher tnbtuMat = tnbtuPat.matcher(url.toExternalForm());
|
||||
if (tnbtuMat.matches()) {
|
||||
return getHost() + "_" + "The_Night_Belongs_to_Us";
|
||||
}
|
||||
|
||||
Pattern shipinbottlePat = Pattern.compile("https?://shipinbottle.pepsaga.com/\\?p=([0-9]*)/?$");
|
||||
Matcher shipinbottleMat =shipinbottlePat.matcher(url.toExternalForm());
|
||||
if (shipinbottleMat.matches()) {
|
||||
@ -413,13 +398,10 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
||||
|| getHost().contains("themonsterunderthebed.net")) {
|
||||
addURLToDownload(url, pageTitle + "_");
|
||||
}
|
||||
if (getHost().contains("tnbtu.com")) {
|
||||
// We need to set the referrer header for tnbtu
|
||||
addURLToDownload(url, getPrefix(index), "","http://www.tnbtu.com/comic", null);
|
||||
} else {
|
||||
// If we're ripping a site where we can't get the page number/title we just rip normally
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
// If we're ripping a site where we can't get the page number/title we just rip normally
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
||||
URLToReturn = URLToReturn.replaceAll("m.xhamster.com", "xhamster.com");
|
||||
URLToReturn = URLToReturn.replaceAll("\\w\\w.xhamster.com", "xhamster.com");
|
||||
URL san_url = new URL(URLToReturn.replaceAll("xhamster.com", "m.xhamster.com"));
|
||||
logger.info("sanitized URL is " + san_url.toExternalForm());
|
||||
LOGGER.info("sanitized URL is " + san_url.toExternalForm());
|
||||
return san_url;
|
||||
}
|
||||
|
||||
@ -62,12 +62,9 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
Pattern p = Pattern.compile("^https?://[wmde.]*xhamster\\.com/photos/gallery/.*?(\\d+)$");
|
||||
Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster\\.com/photos/gallery/.*?(\\d+)$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -49,17 +49,16 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
// Attempt to use album title as GID
|
||||
Element titleElement = getFirstPage().select("meta[name=description]").first();
|
||||
String title = titleElement.attr("content");
|
||||
title = title.substring(title.lastIndexOf('/') + 1);
|
||||
Element titleElement = getFirstPage().select("h1.title").first();
|
||||
String title = titleElement.text();
|
||||
|
||||
Element authorSpan = getFirstPage().select("span[class=creator]").first();
|
||||
String author = authorSpan.select("a").first().text();
|
||||
logger.debug("Author: " + author);
|
||||
LOGGER.debug("Author: " + author);
|
||||
return getHost() + "_" + author + "_" + title.trim();
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
logger.info("Unable to find title at " + url);
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -78,9 +77,9 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
// Page contains images
|
||||
logger.info("Look for images.");
|
||||
LOGGER.info("Look for images.");
|
||||
for (Element thumb : page.select("img")) {
|
||||
logger.info("Img");
|
||||
LOGGER.info("Img");
|
||||
if (super.isStopped()) break;
|
||||
// Find thumbnail image source
|
||||
String image = null;
|
||||
@ -89,7 +88,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
if (thumb.hasAttr("typeof")) {
|
||||
img_type = thumb.attr("typeof");
|
||||
if (img_type.equals("foaf:Image")) {
|
||||
logger.debug("Found image with " + img_type);
|
||||
LOGGER.debug("Found image with " + img_type);
|
||||
if (thumb.parent() != null &&
|
||||
thumb.parent().parent() != null &&
|
||||
thumb.parent().parent().attr("class") != null &&
|
||||
@ -97,7 +96,7 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
)
|
||||
{
|
||||
src = thumb.attr("src");
|
||||
logger.debug("Found url with " + src);
|
||||
LOGGER.debug("Found url with " + src);
|
||||
if (!src.contains("zizki.com")) {
|
||||
} else {
|
||||
imageURLs.add(src.replace("/styles/medium/public/","/styles/large/public/"));
|
||||
|
@ -55,7 +55,7 @@ public class CliphunterRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
String html = Http.url(url).get().html();
|
||||
String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21);
|
||||
jsonString = jsonString.substring(0, jsonString.indexOf("'"));
|
||||
|
@ -65,14 +65,14 @@ public class GfycatRipper extends VideoRipper {
|
||||
* @throws IOException
|
||||
*/
|
||||
public static String getVideoURL(URL url) throws IOException {
|
||||
logger.info("Retrieving " + url.toExternalForm());
|
||||
LOGGER.info("Retrieving " + url.toExternalForm());
|
||||
|
||||
//Sanitize the URL first
|
||||
url = new URL(url.toExternalForm().replace("/gifs/detail", ""));
|
||||
|
||||
Document doc = Http.url(url).get();
|
||||
Elements videos = doc.select("source#mp4Source");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find source#mp4source at " + url);
|
||||
}
|
||||
String vidUrl = videos.first().attr("src");
|
||||
|
@ -52,13 +52,13 @@ public class MotherlessVideoRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
String html = Http.url(this.url).get().toString();
|
||||
if (html.contains("__fileurl = '")) {
|
||||
logger.error("WTF");
|
||||
LOGGER.error("WTF");
|
||||
}
|
||||
List<String> vidUrls = Utils.between(html, "__fileurl = '", "';");
|
||||
if (vidUrls.size() == 0) {
|
||||
if (vidUrls.isEmpty()) {
|
||||
throw new IOException("Could not find video URL at " + url);
|
||||
}
|
||||
String vidUrl = vidUrls.get(0);
|
||||
|
@ -54,7 +54,7 @@ public class PornhubRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
||||
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||
Document doc = Http.url(this.url).get();
|
||||
String html = doc.body().html();
|
||||
Pattern p = Pattern.compile("^.*flashvars_[0-9]+ = (.+});.*$", Pattern.DOTALL);
|
||||
@ -81,10 +81,10 @@ public class PornhubRipper extends VideoRipper {
|
||||
}
|
||||
addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
|
||||
} catch (JSONException e) {
|
||||
logger.error("Error while parsing JSON at " + url, e);
|
||||
LOGGER.error("Error while parsing JSON at " + url, e);
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while retrieving video URL at " + url, e);
|
||||
LOGGER.error("Error while retrieving video URL at " + url, e);
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
@ -55,14 +55,14 @@ public class TwitchVideoRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
|
||||
//Get user friendly filename from page title
|
||||
String title = doc.title();
|
||||
|
||||
Elements script = doc.select("script");
|
||||
if (script.size() == 0) {
|
||||
if (script.isEmpty()) {
|
||||
throw new IOException("Could not find script code at " + url);
|
||||
}
|
||||
//Regex assumes highest quality source is listed first
|
||||
|
@ -53,10 +53,10 @@ public class ViddmeRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
||||
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements videos = doc.select("meta[name=twitter:player:stream]");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find twitter:player:stream at " + url);
|
||||
}
|
||||
String vidUrl = videos.first().attr("content");
|
||||
|
@ -54,10 +54,10 @@ public class VidearnRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
List<String> mp4s = Utils.between(doc.html(), "file:\"", "\"");
|
||||
if (mp4s.size() == 0) {
|
||||
if (mp4s.isEmpty()) {
|
||||
throw new IOException("Could not find files at " + url);
|
||||
}
|
||||
String vidUrl = mp4s.get(0);
|
||||
|
@ -54,10 +54,10 @@ public class VineRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url.toExternalForm());
|
||||
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements props = doc.select("meta[property=twitter:player:stream]");
|
||||
if (props.size() == 0) {
|
||||
if (props.isEmpty()) {
|
||||
throw new IOException("Could not find meta property 'twitter:player:stream' at " + url);
|
||||
}
|
||||
String vidUrl = props.get(0).attr("content");
|
||||
|
@ -52,7 +52,7 @@ public class VkRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
String videoURL = getVideoURLAtPage(this.url.toExternalForm());
|
||||
addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
|
||||
waitForThreads();
|
||||
|
@ -54,10 +54,10 @@ public class XhamsterRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
Elements videos = doc.select("div.player-container > a");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find Embed code at " + url);
|
||||
}
|
||||
String vidUrl = videos.attr("href");
|
||||
|
@ -54,12 +54,12 @@ public class XvideosRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements scripts = doc.select("script");
|
||||
for (Element e : scripts) {
|
||||
if (e.html().contains("html5player.setVideoUrlHigh")) {
|
||||
logger.info("Found the right script");
|
||||
LOGGER.info("Found the right script");
|
||||
String[] lines = e.html().split("\n");
|
||||
for (String line: lines) {
|
||||
if (line.contains("html5player.setVideoUrlHigh")) {
|
||||
|
@ -54,10 +54,10 @@ public class YoupornRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info(" Retrieving " + this.url);
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements videos = doc.select("video");
|
||||
if (videos.size() == 0) {
|
||||
if (videos.isEmpty()) {
|
||||
throw new IOException("Could not find Embed code at " + url);
|
||||
}
|
||||
Element video = videos.get(0);
|
||||
|
@ -55,7 +55,7 @@ public class YuvutuRipper extends VideoRipper {
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
logger.info("Retrieving " + this.url);
|
||||
LOGGER.info("Retrieving " + this.url);
|
||||
Document doc = Http.url(url).get();
|
||||
Element iframe = doc.select("iframe").first();
|
||||
String iframeSrc = iframe.attr("src");
|
||||
@ -65,7 +65,7 @@ public class YuvutuRipper extends VideoRipper {
|
||||
throw new IOException("Could not find iframe code at " + url);
|
||||
}
|
||||
Elements script = doc.select("script");
|
||||
if (script.size() == 0) {
|
||||
if (script.isEmpty()) {
|
||||
throw new IOException("Could not find script code at " + url);
|
||||
}
|
||||
Pattern p = Pattern.compile("file: \"(.*?)\"");
|
||||
|
@ -66,7 +66,7 @@ import javax.swing.UnsupportedLookAndFeelException;
|
||||
*/
|
||||
public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(MainWindow.class);
|
||||
private static final Logger LOGGER = Logger.getLogger(MainWindow.class);
|
||||
|
||||
private boolean isRipping = false; // Flag to indicate if we're ripping something
|
||||
|
||||
@ -115,7 +115,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
private static JButton configSaveDirButton;
|
||||
private static JTextField configRetriesText;
|
||||
private static JCheckBox configAutoupdateCheckbox;
|
||||
private static JComboBox configLogLevelCombobox;
|
||||
private static JComboBox<String> configLogLevelCombobox;
|
||||
private static JCheckBox configURLHistoryCheckbox;
|
||||
private static JCheckBox configPlaySound;
|
||||
private static JCheckBox configSaveOrderCheckbox;
|
||||
@ -127,6 +127,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
private static JCheckBox configSaveDescriptions;
|
||||
private static JCheckBox configPreferMp4;
|
||||
private static JCheckBox configWindowPosition;
|
||||
private static JComboBox<String> configSelectLangComboBox;
|
||||
private static JLabel configThreadsLabel;
|
||||
private static JLabel configTimeoutLabel;
|
||||
private static JLabel configRetriesLabel;
|
||||
|
||||
private static TrayIcon trayIcon;
|
||||
private static MenuItem trayMenuMain;
|
||||
@ -136,11 +140,11 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
|
||||
private static AbstractRipper ripper;
|
||||
|
||||
private ResourceBundle rb = Utils.getResourceBundle();
|
||||
private ResourceBundle rb = Utils.getResourceBundle(null);
|
||||
|
||||
private void updateQueueLabel() {
|
||||
if (queueListModel.size() > 0) {
|
||||
optionQueue.setText( rb.getString("Queue") + " (" + queueListModel.size() + ")");
|
||||
optionQueue.setText(rb.getString("Queue") + " (" + queueListModel.size() + ")");
|
||||
} else {
|
||||
optionQueue.setText(rb.getString("Queue"));
|
||||
}
|
||||
@ -194,7 +198,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
if (!configurationPanel.isVisible()) {
|
||||
optionConfiguration.doClick();
|
||||
}
|
||||
Runnable r = () -> UpdateUtils.updateProgram(configUpdateLabel);
|
||||
Runnable r = () -> UpdateUtils.updateProgramGUI(configUpdateLabel);
|
||||
new Thread(r).start();
|
||||
}
|
||||
|
||||
@ -275,7 +279,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
try {
|
||||
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
|
||||
} catch (ClassNotFoundException | InstantiationException | UnsupportedLookAndFeelException | IllegalAccessException e) {
|
||||
logger.error("[!] Exception setting system theme:", e);
|
||||
LOGGER.error("[!] Exception setting system theme:", e);
|
||||
}
|
||||
|
||||
ripTextfield = new JTextField("", 20);
|
||||
@ -471,9 +475,9 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
// TODO Configuration components
|
||||
configUpdateButton = new JButton(rb.getString("check.for.updates"));
|
||||
configUpdateLabel = new JLabel( rb.getString("current.version") + ": " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT);
|
||||
JLabel configThreadsLabel = new JLabel(rb.getString("max.download.threads") + ":", JLabel.RIGHT);
|
||||
JLabel configTimeoutLabel = new JLabel(rb.getString("timeout.mill"), JLabel.RIGHT);
|
||||
JLabel configRetriesLabel = new JLabel(rb.getString("retry.download.count"), JLabel.RIGHT);
|
||||
configThreadsLabel = new JLabel(rb.getString("max.download.threads") + ":", JLabel.RIGHT);
|
||||
configTimeoutLabel = new JLabel(rb.getString("timeout.mill"), JLabel.RIGHT);
|
||||
configRetriesLabel = new JLabel(rb.getString("retry.download.count"), JLabel.RIGHT);
|
||||
configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3)));
|
||||
configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000)));
|
||||
configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3)));
|
||||
@ -491,7 +495,8 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
configWindowPosition = addNewCheckbox(rb.getString("restore.window.position"), "window.position", true);
|
||||
configURLHistoryCheckbox = addNewCheckbox(rb.getString("remember.url.history"), "remember.url_history", true);
|
||||
|
||||
configLogLevelCombobox = new JComboBox(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
|
||||
configLogLevelCombobox = new JComboBox<>(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
|
||||
configSelectLangComboBox = new JComboBox<>(new String[] {"en_US", "de_DE", "es_ES", "fr_CH", "kr_KR", "pt_PT", "fi_FI", "in_ID", "porrisavvo_FI"});
|
||||
configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug"));
|
||||
setLogLevel(configLogLevelCombobox.getSelectedItem().toString());
|
||||
configSaveDirLabel = new JLabel();
|
||||
@ -504,30 +509,22 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
configSaveDirLabel.setToolTipText(configSaveDirLabel.getText());
|
||||
configSaveDirLabel.setHorizontalAlignment(JLabel.RIGHT);
|
||||
configSaveDirButton = new JButton("Select Save Directory...");
|
||||
gbc.gridy = 0; gbc.gridx = 0; configurationPanel.add(configUpdateLabel, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configUpdateButton, gbc);
|
||||
gbc.gridy = 1; gbc.gridx = 0; configurationPanel.add(configAutoupdateCheckbox, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configLogLevelCombobox, gbc);
|
||||
gbc.gridy = 2; gbc.gridx = 0; configurationPanel.add(configThreadsLabel, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configThreadsText, gbc);
|
||||
gbc.gridy = 3; gbc.gridx = 0; configurationPanel.add(configTimeoutLabel, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configTimeoutText, gbc);
|
||||
gbc.gridy = 4; gbc.gridx = 0; configurationPanel.add(configRetriesLabel, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configRetriesText, gbc);
|
||||
gbc.gridy = 5; gbc.gridx = 0; configurationPanel.add(configOverwriteCheckbox, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configSaveOrderCheckbox, gbc);
|
||||
gbc.gridy = 6; gbc.gridx = 0; configurationPanel.add(configPlaySound, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configSaveLogs, gbc);
|
||||
gbc.gridy = 7; gbc.gridx = 0; configurationPanel.add(configShowPopup, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configSaveURLsOnly, gbc);
|
||||
gbc.gridy = 8; gbc.gridx = 0; configurationPanel.add(configClipboardAutorip, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configSaveAlbumTitles, gbc);
|
||||
gbc.gridy = 9; gbc.gridx = 0; configurationPanel.add(configSaveDescriptions, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configPreferMp4, gbc);
|
||||
gbc.gridy = 10; gbc.gridx = 0; configurationPanel.add(configWindowPosition, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configURLHistoryCheckbox, gbc);
|
||||
gbc.gridy = 11; gbc.gridx = 0; configurationPanel.add(configSaveDirLabel, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(configSaveDirButton, gbc);
|
||||
|
||||
addItemToConfigGridBagConstraints(gbc, 0, configUpdateLabel, configUpdateButton);
|
||||
addItemToConfigGridBagConstraints(gbc, 1, configAutoupdateCheckbox, configLogLevelCombobox);
|
||||
addItemToConfigGridBagConstraints(gbc, 2, configThreadsLabel, configThreadsText);
|
||||
addItemToConfigGridBagConstraints(gbc, 3, configTimeoutLabel, configTimeoutText);
|
||||
addItemToConfigGridBagConstraints(gbc, 4, configRetriesLabel, configRetriesText);
|
||||
addItemToConfigGridBagConstraints(gbc, 5, configOverwriteCheckbox, configSaveOrderCheckbox);
|
||||
addItemToConfigGridBagConstraints(gbc, 6, configPlaySound, configSaveLogs);
|
||||
addItemToConfigGridBagConstraints(gbc, 7, configShowPopup, configSaveURLsOnly);
|
||||
addItemToConfigGridBagConstraints(gbc, 8, configClipboardAutorip, configSaveAlbumTitles);
|
||||
addItemToConfigGridBagConstraints(gbc, 9, configSaveDescriptions, configPreferMp4);
|
||||
addItemToConfigGridBagConstraints(gbc, 10, configWindowPosition, configURLHistoryCheckbox);
|
||||
addItemToConfigGridBagConstraints(gbc, 11, configSelectLangComboBox);
|
||||
addItemToConfigGridBagConstraints(gbc, 12, configSaveDirLabel, configSaveDirButton);
|
||||
|
||||
|
||||
|
||||
|
||||
emptyPanel = new JPanel();
|
||||
@ -550,6 +547,56 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
gbc.fill = GridBagConstraints.HORIZONTAL;
|
||||
}
|
||||
|
||||
private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JLabel thing1ToAdd, JButton thing2ToAdd ) {
|
||||
gbc.gridy = gbcYValue; gbc.gridx = 0; configurationPanel.add(thing1ToAdd, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(thing2ToAdd, gbc);
|
||||
}
|
||||
|
||||
private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JLabel thing1ToAdd, JTextField thing2ToAdd ) {
|
||||
gbc.gridy = gbcYValue; gbc.gridx = 0; configurationPanel.add(thing1ToAdd, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(thing2ToAdd, gbc);
|
||||
}
|
||||
|
||||
private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JCheckBox thing1ToAdd, JCheckBox thing2ToAdd ) {
|
||||
gbc.gridy = gbcYValue; gbc.gridx = 0; configurationPanel.add(thing1ToAdd, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(thing2ToAdd, gbc);
|
||||
}
|
||||
|
||||
private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JCheckBox thing1ToAdd, JComboBox thing2ToAdd ) {
|
||||
gbc.gridy = gbcYValue; gbc.gridx = 0; configurationPanel.add(thing1ToAdd, gbc);
|
||||
gbc.gridx = 1; configurationPanel.add(thing2ToAdd, gbc);
|
||||
}
|
||||
|
||||
private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JComboBox thing1ToAdd ) {
|
||||
gbc.gridy = gbcYValue; gbc.gridx = 0; configurationPanel.add(thing1ToAdd, gbc);
|
||||
}
|
||||
|
||||
private void changeLocale() {
|
||||
statusLabel.setText(rb.getString("inactive"));
|
||||
configUpdateButton.setText(rb.getString("check.for.updates"));
|
||||
configUpdateLabel.setText(rb.getString("current.version") + ": " + UpdateUtils.getThisJarVersion());
|
||||
configThreadsLabel.setText(rb.getString("max.download.threads"));
|
||||
configTimeoutLabel.setText(rb.getString("timeout.mill"));
|
||||
configRetriesLabel.setText(rb.getString("retry.download.count"));
|
||||
configOverwriteCheckbox.setText(rb.getString("overwrite.existing.files"));
|
||||
configAutoupdateCheckbox.setText(rb.getString("auto.update"));
|
||||
configPlaySound.setText(rb.getString("sound.when.rip.completes"));
|
||||
configShowPopup.setText(rb.getString("notification.when.rip.starts"));
|
||||
configSaveOrderCheckbox.setText(rb.getString("preserve.order"));
|
||||
configSaveLogs.setText(rb.getString("save.logs"));
|
||||
configSaveURLsOnly.setText(rb.getString("save.urls.only"));
|
||||
configSaveAlbumTitles.setText(rb.getString("save.album.titles"));
|
||||
configClipboardAutorip.setText(rb.getString("autorip.from.clipboard"));
|
||||
configSaveDescriptions.setText(rb.getString("save.descriptions"));
|
||||
configPreferMp4.setText(rb.getString("prefer.mp4.over.gif"));
|
||||
configWindowPosition.setText(rb.getString("restore.window.position"));
|
||||
configURLHistoryCheckbox.setText(rb.getString("remember.url.history"));
|
||||
optionLog.setText(rb.getString("Log"));
|
||||
optionHistory.setText(rb.getString("History"));
|
||||
optionQueue.setText(rb.getString("Queue"));
|
||||
optionConfiguration.setText(rb.getString("Configuration"));
|
||||
}
|
||||
|
||||
private void setupHandlers() {
|
||||
ripButton.addActionListener(new RipButtonHandler());
|
||||
ripTextfield.addActionListener(new RipButtonHandler());
|
||||
@ -739,13 +786,18 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
}
|
||||
});
|
||||
configUpdateButton.addActionListener(arg0 -> {
|
||||
Thread t = new Thread(() -> UpdateUtils.updateProgram(configUpdateLabel));
|
||||
Thread t = new Thread(() -> UpdateUtils.updateProgramGUI(configUpdateLabel));
|
||||
t.start();
|
||||
});
|
||||
configLogLevelCombobox.addActionListener(arg0 -> {
|
||||
String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString();
|
||||
setLogLevel(level);
|
||||
});
|
||||
configSelectLangComboBox.addActionListener(arg0 -> {
|
||||
String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString();
|
||||
rb = Utils.getResourceBundle(level);
|
||||
changeLocale();
|
||||
});
|
||||
configSaveDirLabel.addMouseListener(new MouseAdapter() {
|
||||
@Override
|
||||
public void mouseClicked(MouseEvent e) {
|
||||
@ -769,7 +821,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
try {
|
||||
chosenPath = chosenFile.getCanonicalPath();
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while getting selected path: ", e);
|
||||
LOGGER.error("Error while getting selected path: ", e);
|
||||
return;
|
||||
}
|
||||
configSaveDirLabel.setText(Utils.shortenPath(chosenPath));
|
||||
@ -825,7 +877,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
break;
|
||||
}
|
||||
Logger.getRootLogger().setLevel(newLevel);
|
||||
logger.setLevel(newLevel);
|
||||
LOGGER.setLevel(newLevel);
|
||||
ConsoleAppender ca = (ConsoleAppender)Logger.getRootLogger().getAppender("stdout");
|
||||
if (ca != null) {
|
||||
ca.setThreshold(newLevel);
|
||||
@ -897,9 +949,9 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
new ImageIcon(mainIcon));
|
||||
if (response == JOptionPane.YES_OPTION) {
|
||||
try {
|
||||
Desktop.getDesktop().browse(URI.create("http://github.com/4pr0n/ripme"));
|
||||
Desktop.getDesktop().browse(URI.create("http://github.com/ripmeapp/ripme"));
|
||||
} catch (IOException e) {
|
||||
logger.error("Exception while opening project home page", e);
|
||||
LOGGER.error("Exception while opening project home page", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -972,10 +1024,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
HISTORY.clear();
|
||||
if (historyFile.exists()) {
|
||||
try {
|
||||
logger.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
|
||||
LOGGER.info(rb.getString("loading.history.from") + " " + historyFile.getCanonicalPath());
|
||||
HISTORY.fromFile(historyFile.getCanonicalPath());
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to load history from file " + historyFile, e);
|
||||
LOGGER.error("Failed to load history from file " + historyFile, e);
|
||||
JOptionPane.showMessageDialog(null,
|
||||
"RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" +
|
||||
"Error: " + e.getMessage() + "\n\n" +
|
||||
@ -985,9 +1037,9 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
JOptionPane.ERROR_MESSAGE);
|
||||
}
|
||||
} else {
|
||||
logger.info(rb.getString("loading.history.from.configuration"));
|
||||
LOGGER.info(rb.getString("loading.history.from.configuration"));
|
||||
HISTORY.fromList(Utils.getConfigList("download.history"));
|
||||
if (HISTORY.toList().size() == 0) {
|
||||
if (HISTORY.toList().isEmpty()) {
|
||||
// Loaded from config, still no entries.
|
||||
// Guess rip history based on rip folder
|
||||
String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory());
|
||||
@ -1015,7 +1067,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
HISTORY.toFile(historyFile.toString());
|
||||
Utils.setConfigList("download.history", Collections.emptyList());
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to save history to file " + historyFile, e);
|
||||
LOGGER.error("Failed to save history to file " + historyFile, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1037,7 +1089,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
try {
|
||||
Thread.sleep(500);
|
||||
} catch (InterruptedException ie) {
|
||||
logger.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
|
||||
LOGGER.error(rb.getString("interrupted.while.waiting.to.rip.next.album"), ie);
|
||||
}
|
||||
ripNextAlbum();
|
||||
} else {
|
||||
@ -1061,7 +1113,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
try {
|
||||
url = new URL(urlString);
|
||||
} catch (MalformedURLException e) {
|
||||
logger.error("[!] Could not generate URL for '" + urlString + "'", e);
|
||||
LOGGER.error("[!] Could not generate URL for '" + urlString + "'", e);
|
||||
error("Given URL is not valid, expecting http://website.com/page/...");
|
||||
return null;
|
||||
}
|
||||
@ -1076,7 +1128,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
ripper.setup();
|
||||
} catch (Exception e) {
|
||||
failed = true;
|
||||
logger.error("Could not find ripper for URL " + url, e);
|
||||
LOGGER.error("Could not find ripper for URL " + url, e);
|
||||
error(e.getMessage());
|
||||
}
|
||||
if (!failed) {
|
||||
@ -1094,7 +1146,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
}
|
||||
return t;
|
||||
} catch (Exception e) {
|
||||
logger.error("[!] Error while ripping: " + e.getMessage(), e);
|
||||
LOGGER.error("[!] Error while ripping: " + e.getMessage(), e);
|
||||
error("Unable to rip this URL: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
@ -1145,28 +1197,34 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
switch(msg.getStatus()) {
|
||||
case LOADING_RESOURCE:
|
||||
case DOWNLOAD_STARTED:
|
||||
if (logger.isEnabledFor(Level.INFO)) {
|
||||
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||
appendLog("Downloading " + msg.getObject(), Color.BLACK);
|
||||
}
|
||||
break;
|
||||
case DOWNLOAD_COMPLETE:
|
||||
if (logger.isEnabledFor(Level.INFO)) {
|
||||
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||
appendLog("Downloaded " + msg.getObject(), Color.GREEN);
|
||||
}
|
||||
break;
|
||||
case DOWNLOAD_COMPLETE_HISTORY:
|
||||
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||
appendLog("" + msg.getObject(), Color.GREEN);
|
||||
}
|
||||
break;
|
||||
|
||||
case DOWNLOAD_ERRORED:
|
||||
if (logger.isEnabledFor(Level.ERROR)) {
|
||||
if (LOGGER.isEnabledFor(Level.ERROR)) {
|
||||
appendLog((String) msg.getObject(), Color.RED);
|
||||
}
|
||||
break;
|
||||
case DOWNLOAD_WARN:
|
||||
if (logger.isEnabledFor(Level.WARN)) {
|
||||
if (LOGGER.isEnabledFor(Level.WARN)) {
|
||||
appendLog((String) msg.getObject(), Color.ORANGE);
|
||||
}
|
||||
break;
|
||||
|
||||
case RIP_ERRORED:
|
||||
if (logger.isEnabledFor(Level.ERROR)) {
|
||||
if (LOGGER.isEnabledFor(Level.ERROR)) {
|
||||
appendLog((String) msg.getObject(), Color.RED);
|
||||
}
|
||||
stopButton.setEnabled(false);
|
||||
@ -1218,7 +1276,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
try {
|
||||
Desktop.getDesktop().open(new File(event.getActionCommand()));
|
||||
} catch (Exception e) {
|
||||
logger.error(e);
|
||||
LOGGER.error(e);
|
||||
}
|
||||
});
|
||||
pack();
|
||||
@ -1289,7 +1347,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
Utils.setConfigInteger("window.y", y);
|
||||
Utils.setConfigInteger("window.w", w);
|
||||
Utils.setConfigInteger("window.h", h);
|
||||
logger.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")");
|
||||
LOGGER.debug("Saved window position (x=" + x + ", y=" + y + ", w=" + w + ", h=" + h + ")");
|
||||
}
|
||||
|
||||
private static void restoreWindowPosition(Frame frame) {
|
||||
@ -1304,7 +1362,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
int w = Utils.getConfigInteger("window.w", -1);
|
||||
int h = Utils.getConfigInteger("window.h", -1);
|
||||
if (x < 0 || y < 0 || w <= 0 || h <= 0) {
|
||||
logger.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config");
|
||||
LOGGER.debug("UNUSUAL: One or more of: x, y, w, or h was still less than 0 after reading config");
|
||||
mainFrame.setLocationRelativeTo(null); // default to middle of screen
|
||||
return;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ public class RipStatusMessage {
|
||||
DOWNLOAD_STARTED("Download Started"),
|
||||
DOWNLOAD_COMPLETE("Download Complete"),
|
||||
DOWNLOAD_ERRORED("Download Errored"),
|
||||
DOWNLOAD_COMPLETE_HISTORY("Download Complete History"),
|
||||
RIP_COMPLETE("Rip Complete"),
|
||||
DOWNLOAD_WARN("Download problem"),
|
||||
TOTAL_BYTES("Total bytes"),
|
||||
|
@ -1,13 +1,12 @@
|
||||
package com.rarchives.ripme.ui;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.*;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
import javax.swing.JLabel;
|
||||
import javax.swing.JOptionPane;
|
||||
import javax.xml.bind.annotation.adapters.HexBinaryAdapter;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.json.JSONArray;
|
||||
@ -21,11 +20,12 @@ import com.rarchives.ripme.utils.Utils;
|
||||
public class UpdateUtils {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||
private static final String DEFAULT_VERSION = "1.7.47";
|
||||
private static final String DEFAULT_VERSION = "1.7.55";
|
||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||
private static final String mainFileName = "ripme.jar";
|
||||
private static final String updateFileName = "ripme.jar.update";
|
||||
private static JSONObject ripmeJson;
|
||||
|
||||
private static String getUpdateJarURL(String latestVersion) {
|
||||
return "https://github.com/" + REPO_NAME + "/releases/download/" + latestVersion + "/ripme.jar";
|
||||
@ -40,7 +40,65 @@ public class UpdateUtils {
|
||||
return thisVersion;
|
||||
}
|
||||
|
||||
public static void updateProgram(JLabel configUpdateLabel) {
|
||||
private static String getChangeList(JSONObject rj) {
|
||||
JSONArray jsonChangeList = rj.getJSONArray("changeList");
|
||||
StringBuilder changeList = new StringBuilder();
|
||||
for (int i = 0; i < jsonChangeList.length(); i++) {
|
||||
String change = jsonChangeList.getString(i);
|
||||
if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) {
|
||||
break;
|
||||
}
|
||||
changeList.append("\n").append(change);
|
||||
}
|
||||
return changeList.toString();
|
||||
}
|
||||
|
||||
public static void updateProgramCLI() {
|
||||
logger.info("Checking for update...");
|
||||
|
||||
Document doc = null;
|
||||
try {
|
||||
logger.debug("Retrieving " + UpdateUtils.updateJsonURL);
|
||||
doc = Jsoup.connect(UpdateUtils.updateJsonURL)
|
||||
.timeout(10 * 1000)
|
||||
.ignoreContentType(true)
|
||||
.get();
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while fetching update: ", e);
|
||||
JOptionPane.showMessageDialog(null,
|
||||
"<html><font color=\"red\">Error while fetching update: " + e.getMessage() + "</font></html>",
|
||||
"RipMe Updater",
|
||||
JOptionPane.ERROR_MESSAGE);
|
||||
return;
|
||||
} finally {
|
||||
logger.info("Current version: " + getThisJarVersion());
|
||||
}
|
||||
String jsonString = doc.body().html().replaceAll(""", "\"");
|
||||
ripmeJson = new JSONObject(jsonString);
|
||||
|
||||
String changeList = getChangeList(ripmeJson);
|
||||
|
||||
logger.info("Change log: \n" + changeList);
|
||||
|
||||
String latestVersion = ripmeJson.getString("latestVersion");
|
||||
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
||||
logger.info("Found newer version: " + latestVersion);
|
||||
logger.info("Downloading new version...");
|
||||
logger.info("New version found, downloading...");
|
||||
try {
|
||||
UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), false);
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while updating: ", e);
|
||||
}
|
||||
} else {
|
||||
logger.debug("This version (" + UpdateUtils.getThisJarVersion() +
|
||||
") is the same or newer than the website's version (" + latestVersion + ")");
|
||||
logger.info("v" + UpdateUtils.getThisJarVersion() + " is the latest version");
|
||||
logger.debug("Running latest version: " + UpdateUtils.getThisJarVersion());
|
||||
}
|
||||
}
|
||||
|
||||
public static void updateProgramGUI(JLabel configUpdateLabel) {
|
||||
configUpdateLabel.setText("Checking for update...");
|
||||
|
||||
Document doc = null;
|
||||
@ -61,24 +119,17 @@ public class UpdateUtils {
|
||||
configUpdateLabel.setText("Current version: " + getThisJarVersion());
|
||||
}
|
||||
String jsonString = doc.body().html().replaceAll(""", "\"");
|
||||
JSONObject json = new JSONObject(jsonString);
|
||||
JSONArray jsonChangeList = json.getJSONArray("changeList");
|
||||
StringBuilder changeList = new StringBuilder();
|
||||
for (int i = 0; i < jsonChangeList.length(); i++) {
|
||||
String change = jsonChangeList.getString(i);
|
||||
if (change.startsWith(UpdateUtils.getThisJarVersion() + ":")) {
|
||||
break;
|
||||
}
|
||||
changeList.append("<br> + ").append(change);
|
||||
}
|
||||
ripmeJson = new JSONObject(jsonString);
|
||||
|
||||
String latestVersion = json.getString("latestVersion");
|
||||
String changeList = getChangeList(ripmeJson);
|
||||
|
||||
String latestVersion = ripmeJson.getString("latestVersion");
|
||||
if (UpdateUtils.isNewerVersion(latestVersion)) {
|
||||
logger.info("Found newer version: " + latestVersion);
|
||||
int result = JOptionPane.showConfirmDialog(
|
||||
null,
|
||||
"<html><font color=\"green\">New version (" + latestVersion + ") is available!</font>"
|
||||
+ "<br><br>Recent changes:" + changeList.toString()
|
||||
+ "<br><br>Recent changes:" + changeList
|
||||
+ "<br><br>Do you want to download and run the newest version?</html>",
|
||||
"RipMe Updater",
|
||||
JOptionPane.YES_NO_OPTION);
|
||||
@ -90,7 +141,7 @@ public class UpdateUtils {
|
||||
configUpdateLabel.setText("<html><font color=\"green\">Downloading new version...</font></html>");
|
||||
logger.info("New version found, downloading...");
|
||||
try {
|
||||
UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion));
|
||||
UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), true);
|
||||
} catch (IOException e) {
|
||||
JOptionPane.showMessageDialog(null,
|
||||
"Error while updating: " + e.getMessage(),
|
||||
@ -108,6 +159,11 @@ public class UpdateUtils {
|
||||
}
|
||||
|
||||
private static boolean isNewerVersion(String latestVersion) {
|
||||
// If we're testing the update utils we want the program to always try to update
|
||||
if (Utils.getConfigBoolean("testing.always_try_to_update", false)) {
|
||||
logger.info("isNewerVersion is returning true because the key \"testing.always_try_to_update\" is true");
|
||||
return true;
|
||||
}
|
||||
int[] oldVersions = versionStringToInt(getThisJarVersion());
|
||||
int[] newVersions = versionStringToInt(latestVersion);
|
||||
if (oldVersions.length < newVersions.length) {
|
||||
@ -141,7 +197,32 @@ public class UpdateUtils {
|
||||
return intVersions;
|
||||
}
|
||||
|
||||
private static void downloadJarAndLaunch(String updateJarURL)
|
||||
// Code take from https://stackoverflow.com/a/30925550
|
||||
private static String createSha256(File file) {
|
||||
try {
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-256");
|
||||
InputStream fis = new FileInputStream(file);
|
||||
int n = 0;
|
||||
byte[] buffer = new byte[8192];
|
||||
while (n != -1) {
|
||||
n = fis.read(buffer);
|
||||
if (n > 0) {
|
||||
digest.update(buffer, 0, n);
|
||||
}
|
||||
}
|
||||
// As patch.py writes the hash in lowercase this must return the has in lowercase
|
||||
return new HexBinaryAdapter().marshal(digest.digest()).toLowerCase();
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
logger.error("Got error getting file hash " + e.getMessage());
|
||||
} catch (FileNotFoundException e) {
|
||||
logger.error("Could not find file: " + file.getName());
|
||||
} catch (IOException e) {
|
||||
logger.error("Got error getting file hash " + e.getMessage());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static void downloadJarAndLaunch(String updateJarURL, Boolean shouldLaunch)
|
||||
throws IOException {
|
||||
Response response;
|
||||
response = Jsoup.connect(updateJarURL)
|
||||
@ -149,60 +230,80 @@ public class UpdateUtils {
|
||||
.timeout(Utils.getConfigInteger("download.timeout", 60 * 1000))
|
||||
.maxBodySize(1024 * 1024 * 100)
|
||||
.execute();
|
||||
FileOutputStream out = new FileOutputStream(updateFileName);
|
||||
out.write(response.bodyAsBytes());
|
||||
out.close();
|
||||
logger.info("Download of new version complete; saved to " + updateFileName);
|
||||
|
||||
// Setup updater script
|
||||
final String batchFile, script;
|
||||
final String[] batchExec;
|
||||
String os = System.getProperty("os.name").toLowerCase();
|
||||
if (os.contains("win")) {
|
||||
// Windows
|
||||
batchFile = "update_ripme.bat";
|
||||
String batchPath = new File(batchFile).getAbsolutePath();
|
||||
script = "@echo off\r\n"
|
||||
+ "timeout 1" + "\r\n"
|
||||
+ "copy " + updateFileName + " " + mainFileName + "\r\n"
|
||||
+ "del " + updateFileName + "\r\n"
|
||||
+ "ripme.jar" + "\r\n"
|
||||
+ "del " + batchPath + "\r\n";
|
||||
batchExec = new String[] { batchPath };
|
||||
try (FileOutputStream out = new FileOutputStream(updateFileName)) {
|
||||
out.write(response.bodyAsBytes());
|
||||
}
|
||||
// Only check the hash if the user hasn't disabled hash checking
|
||||
if (Utils.getConfigBoolean("security.check_update_hash", true)) {
|
||||
String updateHash = createSha256(new File(updateFileName));
|
||||
logger.info("Download of new version complete; saved to " + updateFileName);
|
||||
logger.info("Checking hash of update");
|
||||
|
||||
}
|
||||
else {
|
||||
// Mac / Linux
|
||||
batchFile = "update_ripme.sh";
|
||||
String batchPath = new File(batchFile).getAbsolutePath();
|
||||
script = "#!/bin/sh\n"
|
||||
+ "sleep 1" + "\n"
|
||||
+ "cd " + new File(mainFileName).getAbsoluteFile().getParent() + "\n"
|
||||
+ "cp -f " + updateFileName + " " + mainFileName + "\n"
|
||||
+ "rm -f " + updateFileName + "\n"
|
||||
+ "java -jar \"" + new File(mainFileName).getAbsolutePath() + "\" &\n"
|
||||
+ "sleep 1" + "\n"
|
||||
+ "rm -f " + batchPath + "\n";
|
||||
batchExec = new String[] { "sh", batchPath };
|
||||
}
|
||||
// Create updater script
|
||||
BufferedWriter bw = new BufferedWriter(new FileWriter(batchFile));
|
||||
bw.write(script);
|
||||
bw.flush();
|
||||
bw.close();
|
||||
logger.info("Saved update script to " + batchFile);
|
||||
// Run updater script on exit
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
|
||||
try {
|
||||
logger.info("Executing: " + batchFile);
|
||||
Runtime.getRuntime().exec(batchExec);
|
||||
} catch (IOException e) {
|
||||
//TODO implement proper stack trace handling this is really just intented as a placeholder until you implement proper error handling
|
||||
e.printStackTrace();
|
||||
if (!ripmeJson.getString("currentHash").equals(updateHash)) {
|
||||
logger.error("Error: Update has bad hash");
|
||||
logger.debug("Expected hash: " + ripmeJson.getString("currentHash"));
|
||||
logger.debug("Actual hash: " + updateHash);
|
||||
throw new IOException("Got bad file hash");
|
||||
} else {
|
||||
logger.info("Hash is good");
|
||||
}
|
||||
}));
|
||||
logger.info("Exiting older version, should execute update script (" + batchFile + ") during exit");
|
||||
System.exit(0);
|
||||
}
|
||||
if (shouldLaunch) {
|
||||
// Setup updater script
|
||||
final String batchFile, script;
|
||||
final String[] batchExec;
|
||||
String os = System.getProperty("os.name").toLowerCase();
|
||||
if (os.contains("win")) {
|
||||
// Windows
|
||||
batchFile = "update_ripme.bat";
|
||||
String batchPath = new File(batchFile).getAbsolutePath();
|
||||
script = "@echo off\r\n"
|
||||
+ "timeout 1" + "\r\n"
|
||||
+ "copy " + updateFileName + " " + mainFileName + "\r\n"
|
||||
+ "del " + updateFileName + "\r\n"
|
||||
+ "ripme.jar" + "\r\n"
|
||||
+ "del " + batchPath + "\r\n";
|
||||
batchExec = new String[]{batchPath};
|
||||
|
||||
} else {
|
||||
// Mac / Linux
|
||||
batchFile = "update_ripme.sh";
|
||||
String batchPath = new File(batchFile).getAbsolutePath();
|
||||
script = "#!/bin/sh\n"
|
||||
+ "sleep 1" + "\n"
|
||||
+ "cd " + new File(mainFileName).getAbsoluteFile().getParent() + "\n"
|
||||
+ "cp -f " + updateFileName + " " + mainFileName + "\n"
|
||||
+ "rm -f " + updateFileName + "\n"
|
||||
+ "java -jar \"" + new File(mainFileName).getAbsolutePath() + "\" &\n"
|
||||
+ "sleep 1" + "\n"
|
||||
+ "rm -f " + batchPath + "\n";
|
||||
batchExec = new String[]{"sh", batchPath};
|
||||
}
|
||||
|
||||
// Create updater script
|
||||
try (BufferedWriter bw = new BufferedWriter(new FileWriter(batchFile))) {
|
||||
bw.write(script);
|
||||
bw.flush();
|
||||
}
|
||||
|
||||
logger.info("Saved update script to " + batchFile);
|
||||
// Run updater script on exit
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
|
||||
try {
|
||||
logger.info("Executing: " + batchFile);
|
||||
Runtime.getRuntime().exec(batchExec);
|
||||
} catch (IOException e) {
|
||||
//TODO implement proper stack trace handling this is really just intented as a placeholder until you implement proper error handling
|
||||
e.printStackTrace();
|
||||
}
|
||||
}));
|
||||
logger.info("Exiting older version, should execute update script (" + batchFile + ") during exit");
|
||||
System.exit(0);
|
||||
} else {
|
||||
new File(mainFileName).delete();
|
||||
new File(updateFileName).renameTo(new File(mainFileName));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,9 +3,7 @@ package com.rarchives.ripme.utils;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@ -88,6 +86,9 @@ public class RipUtils {
|
||||
logger.warn("Exception while retrieving eroshare page:", e);
|
||||
}
|
||||
return result;
|
||||
} else if (url.toExternalForm().contains("v.redd.it")) {
|
||||
result.add(url);
|
||||
return result;
|
||||
}
|
||||
|
||||
else if (url.toExternalForm().contains("erome.com")) {
|
||||
@ -279,4 +280,16 @@ public class RipUtils {
|
||||
}
|
||||
return url;
|
||||
}
|
||||
/**
|
||||
* Reads a cookie string (Key1=value1;key2=value2) from the config file and turns it into a hashmap
|
||||
* @return Map of cookies containing session data.
|
||||
*/
|
||||
public static Map<String, String> getCookiesFromString(String line) {
|
||||
Map<String,String> cookies = new HashMap<>();
|
||||
for (String pair : line.split(";")) {
|
||||
String[] kv = pair.split("=");
|
||||
cookies.put(kv[0], kv[1]);
|
||||
}
|
||||
return cookies;
|
||||
}
|
||||
}
|
||||
|
@ -1,87 +1,102 @@
|
||||
package com.rarchives.ripme.utils;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.*;
|
||||
import java.util.jar.JarEntry;
|
||||
import java.util.jar.JarFile;
|
||||
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import org.apache.commons.configuration.ConfigurationException;
|
||||
import org.apache.commons.configuration.PropertiesConfiguration;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.MissingResourceException;
|
||||
import java.util.ResourceBundle;
|
||||
import java.util.jar.JarEntry;
|
||||
import java.util.jar.JarFile;
|
||||
|
||||
/**
|
||||
* Common utility functions used in various places throughout the project.
|
||||
*/
|
||||
public class Utils {
|
||||
|
||||
private static final String RIP_DIRECTORY = "rips";
|
||||
private static final String configFile = "rip.properties";
|
||||
private static final String CONFIG_FILE = "rip.properties";
|
||||
private static final String OS = System.getProperty("os.name").toLowerCase();
|
||||
private static final Logger logger = Logger.getLogger(Utils.class);
|
||||
private static final Logger LOGGER = Logger.getLogger(Utils.class);
|
||||
private static final int SHORTENED_PATH_LENGTH = 12;
|
||||
|
||||
private static PropertiesConfiguration config;
|
||||
private static HashMap<String, HashMap<String, String>> cookieCache;
|
||||
|
||||
static {
|
||||
cookieCache = new HashMap<>();
|
||||
|
||||
try {
|
||||
String configPath = getConfigFilePath();
|
||||
File f = new File(configPath);
|
||||
if (!f.exists()) {
|
||||
File file = new File(configPath);
|
||||
|
||||
if (!file.exists()) {
|
||||
// Use default bundled with .jar
|
||||
configPath = configFile;
|
||||
configPath = CONFIG_FILE;
|
||||
}
|
||||
|
||||
config = new PropertiesConfiguration(configPath);
|
||||
logger.info("Loaded " + config.getPath());
|
||||
if (f.exists()) {
|
||||
LOGGER.info("Loaded " + config.getPath());
|
||||
|
||||
if (file.exists()) {
|
||||
// Config was loaded from file
|
||||
if ( !config.containsKey("twitter.auth")
|
||||
|| !config.containsKey("twitter.max_requests")
|
||||
|| !config.containsKey("tumblr.auth")
|
||||
|| !config.containsKey("error.skip404")
|
||||
|| !config.containsKey("gw.api")
|
||||
|| !config.containsKey("page.timeout")
|
||||
|| !config.containsKey("download.max_size")
|
||||
) {
|
||||
if (!config.containsKey("twitter.auth") || !config.containsKey("twitter.max_requests")
|
||||
|| !config.containsKey("tumblr.auth") || !config.containsKey("error.skip404")
|
||||
|| !config.containsKey("gw.api") || !config.containsKey("page.timeout")
|
||||
|| !config.containsKey("download.max_size")) {
|
||||
// Config is missing key fields
|
||||
// Need to reload the default config
|
||||
// See https://github.com/4pr0n/ripme/issues/158
|
||||
logger.warn("Config does not contain key fields, deleting old config");
|
||||
f.delete();
|
||||
config = new PropertiesConfiguration(configFile);
|
||||
logger.info("Loaded " + config.getPath());
|
||||
LOGGER.warn("Config does not contain key fields, deleting old config");
|
||||
file.delete();
|
||||
config = new PropertiesConfiguration(CONFIG_FILE);
|
||||
LOGGER.info("Loaded " + config.getPath());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("[!] Failed to load properties file from " + configFile, e);
|
||||
LOGGER.error("[!] Failed to load properties file from " + CONFIG_FILE, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the root rips directory.
|
||||
* @return
|
||||
* Root directory to save rips to.
|
||||
* @throws IOException
|
||||
*
|
||||
* @return Root directory to save rips to.
|
||||
*/
|
||||
public static File getWorkingDirectory() {
|
||||
String currentDir = ".";
|
||||
try {
|
||||
currentDir = new File(".").getCanonicalPath() + File.separator + RIP_DIRECTORY + File.separator;
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while finding working dir: ", e);
|
||||
LOGGER.error("Error while finding working dir: ", e);
|
||||
}
|
||||
|
||||
if (config != null) {
|
||||
currentDir = getConfigString("rips.directory", currentDir);
|
||||
}
|
||||
|
||||
File workingDir = new File(currentDir);
|
||||
if (!workingDir.exists()) {
|
||||
workingDir.mkdirs();
|
||||
@ -92,7 +107,7 @@ public class Utils {
|
||||
/**
|
||||
* Gets the value of a specific config key.
|
||||
*
|
||||
* @param key The name of the config parameter you want to find.
|
||||
* @param key The name of the config parameter you want to find.
|
||||
* @param defaultValue What the default value would be.
|
||||
*/
|
||||
public static String getConfigString(String key, String defaultValue) {
|
||||
@ -100,36 +115,46 @@ public class Utils {
|
||||
}
|
||||
|
||||
public static String[] getConfigStringArray(String key) {
|
||||
String[] s = config.getStringArray(key);
|
||||
if (s.length == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return s;
|
||||
}
|
||||
String[] configStringArray = config.getStringArray(key);
|
||||
|
||||
return configStringArray.length == 0 ? null : configStringArray;
|
||||
}
|
||||
|
||||
public static int getConfigInteger(String key, int defaultValue) {
|
||||
return config.getInt(key, defaultValue);
|
||||
}
|
||||
|
||||
public static boolean getConfigBoolean(String key, boolean defaultValue) {
|
||||
return config.getBoolean(key, defaultValue);
|
||||
}
|
||||
|
||||
public static List<String> getConfigList(String key) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Object obj : config.getList(key, new ArrayList<String>())) {
|
||||
if (obj instanceof String) {
|
||||
result.add( (String) obj);
|
||||
result.add((String) obj);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
public static void setConfigBoolean(String key, boolean value) { config.setProperty(key, value); }
|
||||
public static void setConfigString(String key, String value) { config.setProperty(key, value); }
|
||||
public static void setConfigInteger(String key, int value) { config.setProperty(key, value); }
|
||||
|
||||
public static void setConfigBoolean(String key, boolean value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigString(String key, String value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigInteger(String key, int value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigList(String key, List<Object> list) {
|
||||
config.clearProperty(key);
|
||||
config.addProperty(key, list);
|
||||
}
|
||||
|
||||
public static void setConfigList(String key, Enumeration<Object> enumeration) {
|
||||
config.clearProperty(key);
|
||||
List<Object> list = new ArrayList<>();
|
||||
@ -142,9 +167,9 @@ public class Utils {
|
||||
public static void saveConfig() {
|
||||
try {
|
||||
config.save(getConfigFilePath());
|
||||
logger.info("Saved configuration to " + getConfigFilePath());
|
||||
LOGGER.info("Saved configuration to " + getConfigFilePath());
|
||||
} catch (ConfigurationException e) {
|
||||
logger.error("Error while saving configuration: ", e);
|
||||
LOGGER.error("Error while saving configuration: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,7 +201,6 @@ public class Utils {
|
||||
return System.getenv("LOCALAPPDATA") + File.separator + "ripme";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Gets the directory of where the config file is stored on a UNIX machine.
|
||||
*/
|
||||
@ -197,13 +221,14 @@ public class Utils {
|
||||
*/
|
||||
private static boolean portableMode() {
|
||||
try {
|
||||
File f = new File(new File(".").getCanonicalPath() + File.separator + configFile);
|
||||
if(f.exists() && !f.isDirectory()) {
|
||||
File file = new File(new File(".").getCanonicalPath() + File.separator + CONFIG_FILE);
|
||||
if (file.exists() && !file.isDirectory()) {
|
||||
return true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -229,6 +254,7 @@ public class Utils {
|
||||
return ".";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the url history file
|
||||
*/
|
||||
@ -238,7 +264,7 @@ public class Utils {
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the path of the url history file
|
||||
* Return the path of the url history file
|
||||
*/
|
||||
public static String getURLHistoryFile() {
|
||||
return getConfigDir() + File.separator + "url_history.txt";
|
||||
@ -248,26 +274,23 @@ public class Utils {
|
||||
* Gets the path to the configuration file.
|
||||
*/
|
||||
private static String getConfigFilePath() {
|
||||
return getConfigDir() + File.separator + configFile;
|
||||
return getConfigDir() + File.separator + CONFIG_FILE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the current working directory (CWD) from a File.
|
||||
* @param saveAs
|
||||
* The File path
|
||||
* @return
|
||||
* saveAs in relation to the CWD
|
||||
*
|
||||
* @param saveAs The File path
|
||||
* @return saveAs in relation to the CWD
|
||||
*/
|
||||
public static String removeCWD(File saveAs) {
|
||||
String prettySaveAs = saveAs.toString();
|
||||
try {
|
||||
prettySaveAs = saveAs.getCanonicalPath();
|
||||
String cwd = new File(".").getCanonicalPath() + File.separator;
|
||||
prettySaveAs = prettySaveAs.replace(
|
||||
cwd,
|
||||
"." + File.separator);
|
||||
prettySaveAs = prettySaveAs.replace(cwd, "." + File.separator);
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception: ", e);
|
||||
LOGGER.error("Exception: ", e);
|
||||
}
|
||||
return prettySaveAs;
|
||||
}
|
||||
@ -276,9 +299,8 @@ public class Utils {
|
||||
* Strips away URL parameters, which usually appear at the end of URLs.
|
||||
* E.g. the ?query on PHP
|
||||
*
|
||||
* @param url The URL to filter/strip
|
||||
* @param url The URL to filter/strip
|
||||
* @param parameter The parameter to strip
|
||||
*
|
||||
* @return The stripped URL
|
||||
*/
|
||||
public static String stripURLParameter(String url, String parameter) {
|
||||
@ -290,13 +312,13 @@ public class Utils {
|
||||
}
|
||||
|
||||
if (paramIndex > 0) {
|
||||
int nextParam = url.indexOf("&", paramIndex+1);
|
||||
int nextParam = url.indexOf('&', paramIndex + 1);
|
||||
if (nextParam != -1) {
|
||||
String c = "&";
|
||||
if (wasFirstParam) {
|
||||
c = "?";
|
||||
}
|
||||
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
|
||||
url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1, url.length());
|
||||
} else {
|
||||
url = url.substring(0, paramIndex);
|
||||
}
|
||||
@ -307,10 +329,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Removes the current working directory from a given filename
|
||||
* @param file
|
||||
* Path to the file
|
||||
* @return
|
||||
* 'file' without the leading current working directory
|
||||
*
|
||||
* @param file Path to the file
|
||||
* @return 'file' without the leading current working directory
|
||||
*/
|
||||
public static String removeCWD(String file) {
|
||||
return removeCWD(new File(file));
|
||||
@ -320,12 +341,11 @@ public class Utils {
|
||||
* Get a list of all Classes within a package.
|
||||
* Works with file system projects and jar files!
|
||||
* Borrowed from StackOverflow, but I don't have a link :[
|
||||
* @param pkgname
|
||||
* The name of the package
|
||||
* @return
|
||||
* List of classes within the package
|
||||
*
|
||||
* @param pkgname The name of the package
|
||||
* @return List of classes within the package
|
||||
*/
|
||||
public static ArrayList<Class<?>> getClassesForPackage(String pkgname) {
|
||||
public static List<Class<?>> getClassesForPackage(String pkgname) {
|
||||
ArrayList<Class<?>> classes = new ArrayList<>();
|
||||
String relPath = pkgname.replace('.', '/');
|
||||
URL resource = ClassLoader.getSystemClassLoader().getResource(relPath);
|
||||
@ -334,7 +354,8 @@ public class Utils {
|
||||
}
|
||||
|
||||
String fullPath = resource.getFile();
|
||||
File directory = null;
|
||||
File directory;
|
||||
|
||||
try {
|
||||
directory = new File(resource.toURI());
|
||||
} catch (URISyntaxException e) {
|
||||
@ -356,8 +377,7 @@ public class Utils {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Load from JAR
|
||||
try {
|
||||
String jarPath = fullPath
|
||||
@ -376,7 +396,7 @@ public class Utils {
|
||||
try {
|
||||
classes.add(Class.forName(className));
|
||||
} catch (ClassNotFoundException e) {
|
||||
logger.error("ClassNotFoundException loading " + className);
|
||||
LOGGER.error("ClassNotFoundException loading " + className);
|
||||
jarFile.close(); // Resource leak fix?
|
||||
throw new RuntimeException("ClassNotFoundException loading " + className);
|
||||
}
|
||||
@ -384,20 +404,18 @@ public class Utils {
|
||||
}
|
||||
jarFile.close(); // Eclipse said not closing it would have a resource leak
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while loading jar file:", e);
|
||||
LOGGER.error("Error while loading jar file:", e);
|
||||
throw new RuntimeException(pkgname + " (" + directory + ") does not appear to be a valid package", e);
|
||||
}
|
||||
}
|
||||
return classes;
|
||||
}
|
||||
|
||||
private static final int SHORTENED_PATH_LENGTH = 12;
|
||||
/**
|
||||
* Shortens the path to a file
|
||||
* @param path
|
||||
* String of the path to the file
|
||||
* @return
|
||||
* The simplified path to the file.
|
||||
*
|
||||
* @param path String of the path to the file
|
||||
* @return The simplified path to the file.
|
||||
*/
|
||||
public static String shortenPath(String path) {
|
||||
return shortenPath(new File(path));
|
||||
@ -405,10 +423,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Shortens the path to a file
|
||||
* @param file
|
||||
* File object that you want the shortened path of.
|
||||
* @return
|
||||
* The simplified path to the file.
|
||||
*
|
||||
* @param file File object that you want the shortened path of.
|
||||
* @return The simplified path to the file.
|
||||
*/
|
||||
public static String shortenPath(File file) {
|
||||
String path = removeCWD(file);
|
||||
@ -422,10 +439,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Sanitizes a string so that a filesystem can handle it
|
||||
* @param text
|
||||
* The text to be sanitized.
|
||||
* @return
|
||||
* The sanitized text.
|
||||
*
|
||||
* @param text The text to be sanitized.
|
||||
* @return The sanitized text.
|
||||
*/
|
||||
public static String filesystemSanitized(String text) {
|
||||
text = text.replaceAll("[^a-zA-Z0-9.-]", "_");
|
||||
@ -434,8 +450,8 @@ public class Utils {
|
||||
|
||||
public static String filesystemSafe(String text) {
|
||||
text = text.replaceAll("[^a-zA-Z0-9.-]", "_")
|
||||
.replaceAll("__", "_")
|
||||
.replaceAll("_+$", "");
|
||||
.replaceAll("__", "_")
|
||||
.replaceAll("_+$", "");
|
||||
if (text.length() > 100) {
|
||||
text = text.substring(0, 99);
|
||||
}
|
||||
@ -451,7 +467,7 @@ public class Utils {
|
||||
public static String getOriginalDirectory(String path) {
|
||||
|
||||
int index;
|
||||
if(isUnix() || isMacOS()) {
|
||||
if (isUnix() || isMacOS()) {
|
||||
index = path.lastIndexOf('/');
|
||||
} else {
|
||||
// current OS is windows - nothing to do here
|
||||
@ -459,17 +475,17 @@ public class Utils {
|
||||
}
|
||||
|
||||
String original = path; // needs to be checked if lowercase exists
|
||||
String lastPart = original.substring(index+1).toLowerCase(); // setting lowercase to check if it exists
|
||||
String lastPart = original.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists
|
||||
|
||||
// Get a List of all Directories and check its lowercase
|
||||
// if file exists return it
|
||||
File f = new File(path.substring(0, index));
|
||||
ArrayList<String> names = new ArrayList<String>(Arrays.asList(f.list()));
|
||||
File file = new File(path.substring(0, index));
|
||||
ArrayList<String> names = new ArrayList<>(Arrays.asList(file.list()));
|
||||
|
||||
for (String s : names) {
|
||||
if(s.toLowerCase().equals(lastPart)) {
|
||||
for (String name : names) {
|
||||
if (name.toLowerCase().equals(lastPart)) {
|
||||
// Building Path of existing file
|
||||
return path.substring(0, index) + File.separator + s;
|
||||
return path.substring(0, index) + File.separator + name;
|
||||
}
|
||||
}
|
||||
|
||||
@ -478,14 +494,13 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Converts an integer into a human readable string
|
||||
* @param bytes
|
||||
* Non-human readable integer.
|
||||
* @return
|
||||
* Human readable interpretation of a byte.
|
||||
*
|
||||
* @param bytes Non-human readable integer.
|
||||
* @return Human readable interpretation of a byte.
|
||||
*/
|
||||
public static String bytesToHumanReadable(int bytes) {
|
||||
float fbytes = (float) bytes;
|
||||
String[] mags = new String[] {"", "K", "M", "G", "T"};
|
||||
String[] mags = new String[]{"", "K", "M", "G", "T"};
|
||||
int magIndex = 0;
|
||||
while (fbytes >= 1024) {
|
||||
fbytes /= 1024;
|
||||
@ -496,6 +511,7 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Gets and returns a list of all the album rippers present in the "com.rarchives.ripme.ripper.rippers" package.
|
||||
*
|
||||
* @return List<String> of all album rippers present.
|
||||
*/
|
||||
public static List<String> getListOfAlbumRippers() throws Exception {
|
||||
@ -508,6 +524,7 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Gets and returns a list of all video rippers present in the "com.rarchives.rime.rippers.video" package
|
||||
*
|
||||
* @return List<String> of all the video rippers.
|
||||
*/
|
||||
public static List<String> getListOfVideoRippers() throws Exception {
|
||||
@ -520,8 +537,8 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Plays a sound from a file.
|
||||
* @param filename
|
||||
* Path to the sound file
|
||||
*
|
||||
* @param filename Path to the sound file
|
||||
*/
|
||||
public static void playSound(String filename) {
|
||||
URL resource = ClassLoader.getSystemClassLoader().getResource(filename);
|
||||
@ -535,7 +552,7 @@ public class Utils {
|
||||
clip.open(AudioSystem.getAudioInputStream(resource));
|
||||
clip.start();
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to play sound " + filename, e);
|
||||
LOGGER.error("Failed to play sound " + filename, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -544,57 +561,55 @@ public class Utils {
|
||||
*/
|
||||
public static void configureLogger() {
|
||||
LogManager.shutdown();
|
||||
String logFile;
|
||||
if (getConfigBoolean("log.save", false)) {
|
||||
logFile = "log4j.file.properties";
|
||||
String logFile = getConfigBoolean("log.save", false) ? "log4j.file.properties" : "log4j.properties";
|
||||
|
||||
try (InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile)) {
|
||||
if (stream == null) {
|
||||
PropertyConfigurator.configure("src/main/resources/" + logFile);
|
||||
} else {
|
||||
PropertyConfigurator.configure(stream);
|
||||
}
|
||||
|
||||
LOGGER.info("Loaded " + logFile);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
else {
|
||||
logFile = "log4j.properties";
|
||||
}
|
||||
InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile);
|
||||
if (stream == null) {
|
||||
PropertyConfigurator.configure("src/main/resources/" + logFile);
|
||||
} else {
|
||||
PropertyConfigurator.configure(stream);
|
||||
}
|
||||
logger.info("Loaded " + logFile);
|
||||
try {
|
||||
stream.close();
|
||||
} catch (IOException e) { }
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets list of strings between two strings.
|
||||
*
|
||||
* @param fullText Text to retrieve from.
|
||||
* @param start String that precedes the desired text
|
||||
* @param finish String that follows the desired text
|
||||
* @param start String that precedes the desired text
|
||||
* @param finish String that follows the desired text
|
||||
* @return List of all strings that are between 'start' and 'finish'
|
||||
*/
|
||||
public static List<String> between(String fullText, String start, String finish) {
|
||||
List<String> result = new ArrayList<>();
|
||||
int i, j;
|
||||
i = fullText.indexOf(start);
|
||||
int i = fullText.indexOf(start);
|
||||
|
||||
while (i >= 0) {
|
||||
i += start.length();
|
||||
j = fullText.indexOf(finish, i);
|
||||
int j = fullText.indexOf(finish, i);
|
||||
if (j < 0) {
|
||||
break;
|
||||
}
|
||||
result.add(fullText.substring(i, j));
|
||||
i = fullText.indexOf(start, j + finish.length());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an URL query
|
||||
*
|
||||
* @param query
|
||||
* The query part of an URL
|
||||
* @param query The query part of an URL
|
||||
* @return The map of all query parameters
|
||||
*/
|
||||
public static Map<String,String> parseUrlQuery(String query) {
|
||||
Map<String,String> res = new HashMap<>();
|
||||
public static Map<String, String> parseUrlQuery(String query) {
|
||||
Map<String, String> res = new HashMap<>();
|
||||
|
||||
if (query.equals("")) {
|
||||
return res;
|
||||
@ -622,10 +637,8 @@ public class Utils {
|
||||
/**
|
||||
* Parses an URL query and returns the requested parameter's value
|
||||
*
|
||||
* @param query
|
||||
* The query part of an URL
|
||||
* @param key
|
||||
* The key whose value is requested
|
||||
* @param query The query part of an URL
|
||||
* @param key The key whose value is requested
|
||||
* @return The associated value or null if key wasn't found
|
||||
*/
|
||||
public static String parseUrlQuery(String query, String key) {
|
||||
@ -655,18 +668,13 @@ public class Utils {
|
||||
return null;
|
||||
}
|
||||
|
||||
private static HashMap<String, HashMap<String, String>> cookieCache;
|
||||
static {
|
||||
cookieCache = new HashMap<String, HashMap<String, String>>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets all the cookies from a certain host
|
||||
*/
|
||||
public static Map<String, String> getCookies(String host) {
|
||||
HashMap<String, String> domainCookies = cookieCache.get(host);
|
||||
if (domainCookies == null) {
|
||||
domainCookies = new HashMap<String, String>();
|
||||
domainCookies = new HashMap<>();
|
||||
String cookiesConfig = getConfigString("cookies." + host, "");
|
||||
for (String pair : cookiesConfig.split(" ")) {
|
||||
pair = pair.trim();
|
||||
@ -686,18 +694,25 @@ public class Utils {
|
||||
*
|
||||
* @return Returns the default resource bundle using the language specified in the config file.
|
||||
*/
|
||||
public static ResourceBundle getResourceBundle() {
|
||||
if (!getConfigString("lang", "").equals("")) {
|
||||
String[] langCode = getConfigString("lang", "").split("_");
|
||||
logger.info("Setting locale to " + getConfigString("lang", ""));
|
||||
public static ResourceBundle getResourceBundle(String langSelect) {
|
||||
if (langSelect == null) {
|
||||
if (!getConfigString("lang", "").equals("")) {
|
||||
String[] langCode = getConfigString("lang", "").split("_");
|
||||
LOGGER.info("Setting locale to " + getConfigString("lang", ""));
|
||||
return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
|
||||
}
|
||||
} else {
|
||||
String[] langCode = langSelect.split("_");
|
||||
LOGGER.info("Setting locale to " + langSelect);
|
||||
return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
|
||||
}
|
||||
try {
|
||||
ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.getDefault(), new UTF8Control());
|
||||
return rb;
|
||||
LOGGER.info("Setting locale to default");
|
||||
return ResourceBundle.getBundle("LabelsBundle", Locale.getDefault(), new UTF8Control());
|
||||
} catch (MissingResourceException e) {
|
||||
ResourceBundle rb = ResourceBundle.getBundle("LabelsBundle", Locale.ROOT);
|
||||
return rb;
|
||||
LOGGER.info("Setting locale to root");
|
||||
return ResourceBundle.getBundle("LabelsBundle", Locale.ROOT);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -10,7 +10,7 @@ Configuration = Configuration
|
||||
current.version = Current version
|
||||
check.for.updates = Check for updates
|
||||
auto.update = Auto-update?
|
||||
max.download.threads = Maximum download threads
|
||||
max.download.threads = Maximum download threads:
|
||||
timeout.mill = Timeout (in milliseconds):
|
||||
retry.download.count = Retry download count
|
||||
overwrite.existing.files = Overwrite existing files?
|
||||
|
37
src/main/resources/LabelsBundle_en_US.properties
Normal file
37
src/main/resources/LabelsBundle_en_US.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Log
|
||||
History = History
|
||||
created = created
|
||||
modified = modified
|
||||
Queue = Queue
|
||||
Configuration = Configuration
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Current version
|
||||
check.for.updates = Check for updates
|
||||
auto.update = Auto-update?
|
||||
max.download.threads = Maximum download threads:
|
||||
timeout.mill = Timeout (in milliseconds):
|
||||
retry.download.count = Retry download count
|
||||
overwrite.existing.files = Overwrite existing files?
|
||||
sound.when.rip.completes = Sound when rip completes
|
||||
preserve.order = Preserve order
|
||||
save.logs = Save logs
|
||||
notification.when.rip.starts = Notification when rip starts
|
||||
save.urls.only = Save URLs only
|
||||
save.album.titles = Save album titles
|
||||
autorip.from.clipboard = Autorip from Clipboard
|
||||
save.descriptions = Save descriptions
|
||||
prefer.mp4.over.gif = Prefer MP4 over GIF
|
||||
restore.window.position = Restore window position
|
||||
remember.url.history = Remember URL history
|
||||
loading.history.from = Loading history from
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Loading history from configuration
|
||||
interrupted.while.waiting.to.rip.next.album = Interrupted while waiting to rip next album
|
||||
inactive = Inactive
|
||||
re-rip.checked = Re-rip Checked
|
||||
remove = Remove
|
||||
clear = Clear
|
37
src/main/resources/LabelsBundle_fi_FI.properties
Normal file
37
src/main/resources/LabelsBundle_fi_FI.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Logi
|
||||
History = Historia
|
||||
created = luotu
|
||||
modified = muokattu
|
||||
Queue = Jono
|
||||
Configuration = Asetukset
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Nykyinen versio
|
||||
check.for.updates = Tarkista päivitykset
|
||||
auto.update = Automaattipäivitys?
|
||||
max.download.threads = Yhtäaikaiset lataukset
|
||||
timeout.mill = Aikakatkaisu (millisekunneissa):
|
||||
retry.download.count = Latauksen uudelleenyritykset
|
||||
overwrite.existing.files = Korvaa nykyiset tiedostot?
|
||||
sound.when.rip.completes = Valmistumisääni
|
||||
preserve.order = Pidä järjestys
|
||||
save.logs = Tallenna logit
|
||||
notification.when.rip.starts = Valmistumisilmoitus
|
||||
save.urls.only = Tallenna vain osoitteet
|
||||
save.album.titles = Tallenna albumiotsikot
|
||||
autorip.from.clipboard = Ota linkit leikepöydältä
|
||||
save.descriptions = Tallenna kuvaukset
|
||||
prefer.mp4.over.gif = Suosi MP4:jää GIF:fin sijasta
|
||||
restore.window.position = Palauta ikkunan sijainti
|
||||
remember.url.history = Muista osoitehistoria
|
||||
loading.history.from = Ladataan historiaa kohteesta
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Ladataan historiaa asetustiedostosta
|
||||
interrupted.while.waiting.to.rip.next.album = Keskeytetty odottaessa seuraavaa albumia
|
||||
inactive = Toimeton
|
||||
re-rip.checked = Uudelleenlataa merkatut
|
||||
remove = Poista
|
||||
clear = Tyhjennä
|
37
src/main/resources/LabelsBundle_in_ID.properties
Normal file
37
src/main/resources/LabelsBundle_in_ID.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Log
|
||||
History = Riwayat
|
||||
created = dibuat pada
|
||||
modified = diubah pada
|
||||
Queue = Antrian
|
||||
Configuration = Pengaturan
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Versi terbaru
|
||||
check.for.updates = Periksa update
|
||||
auto.update = Update otomatis?
|
||||
max.download.threads = Thread unduh maksimal
|
||||
timeout.mill = Batas waktu (dalam milidetik):
|
||||
retry.download.count = Jumlah percobaan unduh
|
||||
overwrite.existing.files = Timpa file yang ada?
|
||||
sound.when.rip.completes = Hidupkan suara saat rip selesai
|
||||
preserve.order = Pertahankan urutan
|
||||
save.logs = Simpan log
|
||||
notification.when.rip.starts = Pemberitahuan saat rip dimulai
|
||||
save.urls.only = Simpan URL saja
|
||||
save.album.titles = Simpan judul album
|
||||
autorip.from.clipboard = Rip otomatis dari clipboard
|
||||
save.descriptions = Simpan deskripsi
|
||||
prefer.mp4.over.gif = Utamakan MP4 dari GIF
|
||||
restore.window.position = Kembalikan ukuran Window
|
||||
remember.url.history = Ingat riwayat URL
|
||||
loading.history.from = Ambil riwayat dari
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Ambil riwayat dari pengaturan
|
||||
interrupted.while.waiting.to.rip.next.album = Terputus saat menunggu rip album selanjutnya
|
||||
inactive = Tidak aktif
|
||||
re-rip.checked = Rip Ulang
|
||||
remove = Hapus
|
||||
clear = Hapus Semua
|
37
src/main/resources/LabelsBundle_it_IT.properties
Normal file
37
src/main/resources/LabelsBundle_it_IT.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Log
|
||||
History = Cronologia
|
||||
created = creato
|
||||
modified = modificato
|
||||
Queue = Coda
|
||||
Configuration = Configurazione
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Versione Corrente
|
||||
check.for.updates = Controlla Aggiornamenti
|
||||
auto.update = Aggiornamento automatico?
|
||||
max.download.threads = Thread massini
|
||||
timeout.mill = Timeout (in millisecondi):
|
||||
retry.download.count = Tentativi di download
|
||||
overwrite.existing.files = Sovrascrivi file esistenti?
|
||||
sound.when.rip.completes = Suono al terminie
|
||||
preserve.order = Conserva ordine
|
||||
save.logs = Salva log
|
||||
notification.when.rip.starts = Notifica inizio
|
||||
save.urls.only = Salva solo URL
|
||||
save.album.titles = Salva titoli album
|
||||
autorip.from.clipboard = Scarica da appunti
|
||||
save.descriptions = Salva descrizioni
|
||||
prefer.mp4.over.gif = Preferisci MP4 a GIF
|
||||
restore.window.position = Ripristina posizione della finestra
|
||||
remember.url.history = Riscorda la cronologia degli URL
|
||||
loading.history.from = Carica cronologia da
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Caricamento cronologia da configurazione
|
||||
interrupted.while.waiting.to.rip.next.album = Interrotto mentre scaricavo album successivo
|
||||
inactive = Inattivo
|
||||
re-rip.checked = Re-rip selezionato
|
||||
remove = Rimuovi
|
||||
clear = Pulisci
|
37
src/main/resources/LabelsBundle_kr_KR.properties
Normal file
37
src/main/resources/LabelsBundle_kr_KR.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = \uB85C\uADF8
|
||||
History = \uD788\uC2A4\uD1A0\uB9AC
|
||||
created = \uC0DD\uC0B0\uB428
|
||||
modified = \uC218\uC815\uB428
|
||||
Queue = \uB300\uAE30\uC5F4
|
||||
Configuration = \uAD6C\uC131
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = \uD604\uC7AC \uBC84\uC804
|
||||
check.for.updates = \uC5C5\uB370\uC774\uD2B8 \uD655\uC778
|
||||
auto.update = \uC790\uB3D9 \uC5C5\uB370\uC774\uD2B8
|
||||
max.download.threads = \uCD5C\uB300 \uB2E4\uC6B4\uB85C\uB4DC \uC4F0\uB808\uB4DC \uC218
|
||||
timeout.mill = \uC2DC\uAC04 \uC81C\uD55C (\uBC00\uB9AC\uCD08):
|
||||
retry.download.count = \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uC2DC\uB3C4 \uD68C\uC218
|
||||
overwrite.existing.files = \uC911\uBCF5\uD30C\uC77C \uB36E\uC5B4\uC4F0\uAE30
|
||||
sound.when.rip.completes = \uC644\uB8CC\uC2DC \uC54C\uB9BC
|
||||
preserve.order = \uBA85\uB839 \uAE30\uC5B5\uD558\uAE30
|
||||
save.logs = \uB85C\uADF8 \uC800\uC7A5
|
||||
notification.when.rip.starts = \uC2DC\uC791\uC2DC \uC54C\uB9BC
|
||||
save.urls.only = URL\uB9CC \uC800\uC7A5\uD558\uAE30
|
||||
save.album.titles = \uC568\uBC94 \uC81C\uBAA9 \uC800\uC7A5
|
||||
autorip.from.clipboard = \uD074\uB9BD\uBCF4\uB4DC\uC5D0\uC11C \uC790\uB3D9\uC73C\uB85C \uAC00\uC838\uC624\uAE30
|
||||
save.descriptions = \uC568\uBC94 \uC124\uBA85 \uC800\uC7A5
|
||||
prefer.mp4.over.gif = GIF\uBCF4\uB2E4 MP4 \uC120\uD638
|
||||
restore.window.position = \uCC3D \uC704\uCE58 \uBCF5\uC6D0
|
||||
remember.url.history = URL \uD788\uC2A4\uD1A0\uB9AC \uAE30\uC5B5\uD558\uAE30
|
||||
loading.history.from = \uD788\uC2A4\uD1A0\uB9AC \uAC00\uC838\uC624\uAE30
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = \uAD6C\uC131\uC5D0\uC11C \uD788\uC2A4\uD1A0\uB9AC \uBD88\uB7EC\uC624\uAE30
|
||||
interrupted.while.waiting.to.rip.next.album = \uB2E4\uC74C \uC568\uBC94 \uBCF5\uC0AC\uB97C \uAE30\uB2E4\uB9AC\uB294\uB3D9\uC548 \uC911\uB2E8\uB428
|
||||
inactive = \uBE44\uD65C\uC131\uD654
|
||||
re-rip.checked = \uB2E4\uC2DC \uBCF5\uC0AC\uD558\uAE30 \uCCB4\uD06C\uB428
|
||||
remove = \uC120\uD0DD\uD55C \uAE30\uB85D \uC0AD\uC81C
|
||||
clear = \uD788\uC2A4\uD1A0\uB9AC \uBAA8\uB450 \uC0AD\uC81C
|
37
src/main/resources/LabelsBundle_porrisavvo_FI.properties
Normal file
37
src/main/resources/LabelsBundle_porrisavvo_FI.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Loki
|
||||
History = Historriijja
|
||||
created = luatu
|
||||
modified = muakat
|
||||
Queue = Jono
|
||||
Configuration = Assetuksse
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Nykyne versijjo
|
||||
check.for.updates = Tarkist update
|
||||
auto.update = Automaatpäivvitys?
|
||||
max.download.threads = Yht'aikasse ripi
|
||||
timeout.mill = Timeout (millisekois):
|
||||
retry.download.count = Ripi retry count
|
||||
overwrite.existing.files = Korvvaa nykysse filu?
|
||||
sound.when.rip.completes = Valmistummis'ään
|
||||
preserve.order = Pir järestys
|
||||
save.logs = Tallen loki
|
||||
notification.when.rip.starts = Valmistummisilmotus
|
||||
save.urls.only = Tallen vaa ossottee
|
||||
save.album.titles = Tallen album'otsiko
|
||||
autorip.from.clipboard = Ot linki leikpöyrrält
|
||||
save.descriptions = Tallen kuvvauksse
|
||||
prefer.mp4.over.gif = Suasi MP4 GIF sijjaa
|
||||
restore.window.position = Palaut ikkunna sijaant
|
||||
remember.url.history = Muist osot'hissa
|
||||
loading.history.from = Larrataa hissaa lähteest
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Larrataa hissaa asetusfilust
|
||||
interrupted.while.waiting.to.rip.next.album = Keskeytet venates seurraavvaa album
|
||||
inactive = Idle
|
||||
re-rip.checked = Re-rip merkatu
|
||||
remove = Poist
|
||||
clear = Tyhjen
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user