commit
a56c2f8ba6
18
patch.py
18
patch.py
@ -8,8 +8,8 @@ from hashlib import sha256
|
||||
# - update version in a few places
|
||||
# - insert new line in ripme.json with message
|
||||
# - build ripme
|
||||
# - add the hash of the lastest binary to ripme.json
|
||||
|
||||
# - add the hash of the latest binary to ripme.json
|
||||
# - commit all changes
|
||||
message = input('message: ')
|
||||
|
||||
|
||||
@ -30,8 +30,7 @@ def update_hash(current_hash):
|
||||
def update_change_list(message):
|
||||
ripmeJson = get_ripme_json()
|
||||
with open('ripme.json', 'w') as dataFile:
|
||||
ripmeJson["changeList"] = ripmeJson["changeList"].insert(0, message)
|
||||
print(ripmeJson["currentHash"])
|
||||
ripmeJson["changeList"].insert(0, message)
|
||||
json.dump(ripmeJson, dataFile, indent=4)
|
||||
|
||||
|
||||
@ -63,17 +62,9 @@ subprocess.call(['sed', '-i', '-e', substrExpr, 'pom.xml'])
|
||||
subprocess.call(['git', 'grep', '<version>' + nextVersion + '</version>', 'pom.xml'])
|
||||
|
||||
commitMessage = nextVersion + ': ' + message
|
||||
changeLogLine = ' \"' + commitMessage + '\",\n'
|
||||
|
||||
dataFile = open("ripme.json", "r")
|
||||
ripmeJsonLines = dataFile.readlines()
|
||||
ripmeJsonLines.insert(3, changeLogLine)
|
||||
outputContent = ''.join(ripmeJsonLines)
|
||||
dataFile.close()
|
||||
update_change_list(commitMessage)
|
||||
|
||||
dataFile = open("ripme.json", "w")
|
||||
dataFile.write(outputContent)
|
||||
dataFile.close()
|
||||
|
||||
print("Building ripme")
|
||||
subprocess.call(["mvn", "clean", "compile", "assembly:single"])
|
||||
@ -89,3 +80,4 @@ update_hash(file_hash)
|
||||
subprocess.call(['git', 'add', '-u'])
|
||||
subprocess.call(['git', 'commit', '-m', commitMessage])
|
||||
subprocess.call(['git', 'tag', nextVersion])
|
||||
print("Remember to run `git push origin master` before release.py")
|
||||
|
2
pom.xml
2
pom.xml
@ -4,7 +4,7 @@
|
||||
<groupId>com.rarchives.ripme</groupId>
|
||||
<artifactId>ripme</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<version>1.7.51</version>
|
||||
<version>1.7.60</version>
|
||||
<name>ripme</name>
|
||||
<url>http://rip.rarchives.com</url>
|
||||
<properties>
|
||||
|
61
release.py
Normal file → Executable file
61
release.py
Normal file → Executable file
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python3
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
|
||||
@ -15,9 +15,12 @@ parser.add_argument("-f", "--file", help="Path to the version of ripme to releas
|
||||
parser.add_argument("-t", "--token", help="Your github personal access token")
|
||||
parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true")
|
||||
parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true")
|
||||
parser.add_argument("--test", help="Perform a dry run (Do everything but upload new release)", action="store_true")
|
||||
parser.add_argument("--skip-hash-check", help="Skip hash check (This should only be used for testing)", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# This binds input to raw_input on python2, we do this because input acts like eval on python2
|
||||
input = raw_input
|
||||
except NameError:
|
||||
pass
|
||||
@ -34,13 +37,24 @@ def isJar(filename):
|
||||
# false if not
|
||||
def isValidCommitMessage(message):
|
||||
if debug:
|
||||
print("Checking if {} matchs pattern ^\d+\.\d+\.\d+:".format(message))
|
||||
pattern = re.compile("^\d+\.\d+\.\d+:")
|
||||
print(r"Checking if {} matches pattern ^\d+\.\d+\.\d+:".format(message))
|
||||
pattern = re.compile(r"^\d+\.\d+\.\d+:")
|
||||
return re.match(pattern, message)
|
||||
|
||||
|
||||
# Checks if the update has the name ripme.jar, if not it renames the file
|
||||
def checkAndRenameFile(path):
|
||||
"""Check if path (a string) points to a ripme.jar. Returns the possibly renamed file path"""
|
||||
if not path.endswith("ripme.jar"):
|
||||
print("Specified file is not named ripme.jar, renaming")
|
||||
new_path = os.path.join(os.path.dirname(path), "ripme.jar")
|
||||
os.rename(path, new_path)
|
||||
return new_path
|
||||
return path
|
||||
|
||||
|
||||
ripmeJson = json.loads(open("ripme.json").read())
|
||||
fileToUploadPath = args.file
|
||||
fileToUploadPath = checkAndRenameFile(args.file)
|
||||
InNoninteractiveMode = args.non_interactive
|
||||
commitMessage = ripmeJson.get("changeList")[0]
|
||||
releaseVersion = ripmeJson.get("latestVersion")
|
||||
@ -61,22 +75,27 @@ if not isValidCommitMessage(commitMessage):
|
||||
print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath))
|
||||
sys.exit(1)
|
||||
|
||||
ripmeUpdate = open(fileToUploadPath, mode='rb').read()
|
||||
|
||||
# The hash that we expect the update to have
|
||||
expectedHash = ripmeJson.get("currentHash")
|
||||
if not args.skip_hash_check:
|
||||
if debug:
|
||||
print("Reading file {}".format(fileToUploadPath))
|
||||
ripmeUpdate = open(fileToUploadPath, mode='rb').read()
|
||||
|
||||
# The actual hash of the file on disk
|
||||
actualHash = sha256(ripmeUpdate).hexdigest()
|
||||
# The actual hash of the file on disk
|
||||
actualHash = sha256(ripmeUpdate).hexdigest()
|
||||
|
||||
# Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will
|
||||
# cause ripme to refuse to install the update for all users who haven't disabled update hash checking
|
||||
if expectedHash != actualHash:
|
||||
# The hash that we expect the update to have
|
||||
expectedHash = ripmeJson.get("currentHash")
|
||||
|
||||
# Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will
|
||||
# cause ripme to refuse to install the update for all users who haven't disabled update hash checking
|
||||
if expectedHash != actualHash:
|
||||
print("[!] Error: expected hash of file and actual hash differ")
|
||||
print("[!] Expected hash is {}".format(expectedHash))
|
||||
print("[!] Actual hash is {}".format(actualHash))
|
||||
sys.exit(1)
|
||||
|
||||
else:
|
||||
print("[*] WARNING: SKIPPING HASH CHECK")
|
||||
# Ask the user to review the information before we precede
|
||||
# This only runs in we're in interactive mode
|
||||
if not InNoninteractiveMode:
|
||||
@ -85,12 +104,14 @@ if not InNoninteractiveMode:
|
||||
print("Repo: {}/{}".format(repoOwner, repoName))
|
||||
input("\nPlease review the information above and ensure it is correct and then press enter")
|
||||
|
||||
print("Accessing github using token")
|
||||
g = Github(accessToken)
|
||||
if not args.test:
|
||||
print("Accessing github using token")
|
||||
g = Github(accessToken)
|
||||
|
||||
print("Creating release")
|
||||
release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "")
|
||||
|
||||
print("Creating release")
|
||||
release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "")
|
||||
|
||||
print("Uploading file")
|
||||
release.upload_asset(fileToUploadPath, "ripme.jar")
|
||||
print("Uploading file")
|
||||
release.upload_asset(fileToUploadPath, "ripme.jar")
|
||||
else:
|
||||
print("Not uploading release being script was run with --test flag")
|
||||
|
13
ripme.json
13
ripme.json
@ -1,6 +1,14 @@
|
||||
{
|
||||
"currentHash": "aadb71bf5cdf46fe92e270b50a55c8d8d7200a6dd304a4c2ac9f68cddc687d7e",
|
||||
"changeList": [
|
||||
"1.7.60: Fixed EightmusesRipper; added Jab Archives ripper; loveroms ripper now properly names files; Fixed ArtStationRipper",
|
||||
"1.7.59: Added Loverom ripper; added Imagearn ripper; Added support for Desuarchive.org; Fixed erome ripper",
|
||||
"1.7.58: Fixed Deviantart ripper; fixed HitomiRipper; Fixed ManganeloRipper; Fixed update box formating",
|
||||
"1.7.57: Got DeviantartRipper working again; Imagefap ripper now downloads full sized images; Twitter ripper can now rip extended tweets; Added nl_NL translation",
|
||||
"1.7.56: Fixed DeviantartRipper ripper; Added support for resuming file downloads; Fixed erome ripper; Fixed ModelmayhemRipper NSFW image downloading",
|
||||
"1.7.55: Fixed instagram ripper; Reddit ripper now respects history.end_rip_after_already_seen; Improvements to patch.py and release.py",
|
||||
"1.7.54: Fixed twitter ripper video downloading; fixed instagram ripper",
|
||||
"1.7.53: Added Picstatio ripper; Fixed instagram ripper; Reddit ripper now gets videos from v.redd.it; Fixed ZikiRipper getAlbumTitle; fixed twitter ripper",
|
||||
"1.7.52: Added warning about using furaffinty shared account; Refactoring in Utils class; XhamsterRipper now accepts all countries subdomains; E621 ripper now accepts urls with order:Score at the end; release.py imrpovements; DeviantartRipper now logs in using cookies; patch.py imrpovements",
|
||||
"1.7.51: Fixed instagram ripper; Added the ability to rip from vsco profiles; Fixed TheyiffgalleryRipper; Can now update ripme using the -j flag; added script to automate releases; Code style fixes",
|
||||
"1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new",
|
||||
"1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked",
|
||||
@ -223,5 +231,6 @@
|
||||
"1.0.3: Added VK.com ripper",
|
||||
"1.0.1: Added auto-update functionality"
|
||||
],
|
||||
"latestVersion": "1.7.51"
|
||||
"latestVersion": "1.7.60",
|
||||
"currentHash": "f206e478822134328763fc41676f438ee5cc795f31984613619952dab8402301"
|
||||
}
|
@ -97,7 +97,7 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
|
||||
while (doc != null) {
|
||||
if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
|
||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
sendUpdate(STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
break;
|
||||
}
|
||||
List<String> imageURLs = getURLsFromPage(doc);
|
||||
@ -224,7 +224,6 @@ public abstract class AbstractHTMLRipper extends AlbumRipper {
|
||||
if (!subdirectory.equals("")) { // Not sure about this part
|
||||
subdirectory = File.separator + subdirectory;
|
||||
}
|
||||
// TODO Get prefix working again, probably requires reworking a lot of stuff! (Might be fixed now)
|
||||
saveFileAs = new File(
|
||||
workingDir.getCanonicalPath()
|
||||
+ subdirectory
|
||||
|
@ -69,7 +69,7 @@ public abstract class AbstractJSONRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
if (imageURLs.isEmpty()) {
|
||||
if (imageURLs.isEmpty() && !hasASAPRipping()) {
|
||||
throw new IOException("No images found at " + this.url);
|
||||
}
|
||||
|
||||
|
@ -613,4 +613,9 @@ public abstract class AbstractRipper
|
||||
protected boolean isThisATest() {
|
||||
return thisIsATest;
|
||||
}
|
||||
|
||||
// If true ripme uses a byte progress bar
|
||||
protected boolean useByteProgessBar() { return false;}
|
||||
// If true ripme will try to resume a broken download for this ripper
|
||||
protected boolean tryResumeDownload() { return false;}
|
||||
}
|
||||
|
@ -0,0 +1,43 @@
|
||||
package com.rarchives.ripme.ripper;
|
||||
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
|
||||
/**
|
||||
* This is just an extension of AbstractHTMLRipper that auto overrides a few things
|
||||
* to help cut down on copy pasted code
|
||||
*/
|
||||
public abstract class AbstractSingleFileRipper extends AbstractHTMLRipper {
|
||||
private int bytesTotal = 1;
|
||||
private int bytesCompleted = 1;
|
||||
|
||||
protected AbstractSingleFileRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getStatusText() {
|
||||
return Utils.getByteStatusText(getCompletionPercentage(), bytesCompleted, bytesTotal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCompletionPercentage() {
|
||||
return (int) (100 * (bytesCompleted / (float) bytesTotal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytesTotal(int bytes) {
|
||||
this.bytesTotal = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytesCompleted(int bytes) {
|
||||
this.bytesCompleted = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean useByteProgessBar() {return true;}
|
||||
}
|
@ -71,10 +71,7 @@ public abstract class AlbumRipper extends AbstractRipper {
|
||||
try (FileWriter fw = new FileWriter(urlFile, true)) {
|
||||
fw.write(url.toExternalForm());
|
||||
fw.write("\n");
|
||||
|
||||
RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
|
||||
itemsCompleted.put(url, new File(urlFile));
|
||||
observer.update(this, msg);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Error while writing to " + urlFile, e);
|
||||
}
|
||||
|
@ -6,9 +6,11 @@ import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.reflect.Array;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@ -59,18 +61,26 @@ class DownloadFileThread extends Thread {
|
||||
this.cookies = cookies;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Attempts to download the file. Retries as needed.
|
||||
* Notifies observers upon completion/error/warn.
|
||||
*/
|
||||
public void run() {
|
||||
long fileSize = 0;
|
||||
int bytesTotal = 0;
|
||||
int bytesDownloaded = 0;
|
||||
if (saveAs.exists() && observer.tryResumeDownload()) {
|
||||
fileSize = saveAs.length();
|
||||
}
|
||||
try {
|
||||
observer.stopCheck();
|
||||
} catch (IOException e) {
|
||||
observer.downloadErrored(url, "Download interrupted");
|
||||
return;
|
||||
}
|
||||
if (saveAs.exists()) {
|
||||
if (saveAs.exists() && !observer.tryResumeDownload() && !getFileExtFromMIME ||
|
||||
Utils.fuzzyExists(new File(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME && !observer.tryResumeDownload()) {
|
||||
if (Utils.getConfigBoolean("file.overwrite", false)) {
|
||||
logger.info("[!] Deleting existing file" + prettySaveAs);
|
||||
saveAs.delete();
|
||||
@ -80,7 +90,6 @@ class DownloadFileThread extends Thread {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
URL urlToDownload = this.url;
|
||||
boolean redirected = false;
|
||||
int tries = 0; // Number of attempts to download
|
||||
@ -114,11 +123,20 @@ class DownloadFileThread extends Thread {
|
||||
cookie += key + "=" + cookies.get(key);
|
||||
}
|
||||
huc.setRequestProperty("Cookie", cookie);
|
||||
if (observer.tryResumeDownload()) {
|
||||
if (fileSize != 0) {
|
||||
huc.setRequestProperty("Range", "bytes=" + fileSize + "-");
|
||||
}
|
||||
}
|
||||
logger.debug("Request properties: " + huc.getRequestProperties());
|
||||
huc.connect();
|
||||
|
||||
int statusCode = huc.getResponseCode();
|
||||
logger.debug("Status code: " + statusCode);
|
||||
if (statusCode != 206 && observer.tryResumeDownload() && saveAs.exists()) {
|
||||
// TODO find a better way to handle servers that don't support resuming downloads then just erroring out
|
||||
throw new IOException("Server doesn't support resuming downloads");
|
||||
}
|
||||
if (statusCode / 100 == 3) { // 3xx Redirect
|
||||
if (!redirected) {
|
||||
// Don't increment retries on the first redirect
|
||||
@ -146,17 +164,63 @@ class DownloadFileThread extends Thread {
|
||||
observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
|
||||
return;
|
||||
}
|
||||
|
||||
// If the ripper is using the bytes progress bar set bytesTotal to huc.getContentLength()
|
||||
if (observer.useByteProgessBar()) {
|
||||
bytesTotal = huc.getContentLength();
|
||||
observer.setBytesTotal(bytesTotal);
|
||||
observer.sendUpdate(STATUS.TOTAL_BYTES, bytesTotal);
|
||||
logger.debug("Size of file at " + this.url + " = " + bytesTotal + "b");
|
||||
}
|
||||
|
||||
// Save file
|
||||
bis = new BufferedInputStream(huc.getInputStream());
|
||||
|
||||
// Check if we should get the file ext from the MIME type
|
||||
if (getFileExtFromMIME) {
|
||||
String fileExt = URLConnection.guessContentTypeFromStream(bis).replaceAll("image/", "");
|
||||
String fileExt = URLConnection.guessContentTypeFromStream(bis);
|
||||
if (fileExt != null) {
|
||||
fileExt = fileExt.replaceAll("image/", "");
|
||||
saveAs = new File(saveAs.toString() + "." + fileExt);
|
||||
} else {
|
||||
logger.error("Was unable to get content type from stream");
|
||||
// Try to get the file type from the magic number
|
||||
byte[] magicBytes = new byte[8];
|
||||
bis.read(magicBytes,0, 5);
|
||||
bis.reset();
|
||||
fileExt = Utils.getEXTFromMagic(magicBytes);
|
||||
if (fileExt != null) {
|
||||
saveAs = new File(saveAs.toString() + "." + fileExt);
|
||||
} else {
|
||||
logger.error("Was unable to get content type using magic number");
|
||||
logger.error("Magic number was: " + Arrays.toString(magicBytes));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
// If we're resuming a download we append data to the existing file
|
||||
if (statusCode == 206) {
|
||||
fos = new FileOutputStream(saveAs, true);
|
||||
} else {
|
||||
fos = new FileOutputStream(saveAs);
|
||||
IOUtils.copy(bis, fos);
|
||||
}
|
||||
byte[] data = new byte[1024 * 256];
|
||||
int bytesRead;
|
||||
while ( (bytesRead = bis.read(data)) != -1) {
|
||||
try {
|
||||
observer.stopCheck();
|
||||
} catch (IOException e) {
|
||||
observer.downloadErrored(url, "Download interrupted");
|
||||
return;
|
||||
}
|
||||
fos.write(data, 0, bytesRead);
|
||||
if (observer.useByteProgessBar()) {
|
||||
bytesDownloaded += bytesRead;
|
||||
observer.setBytesCompleted(bytesDownloaded);
|
||||
observer.sendUpdate(STATUS.COMPLETED_BYTES, bytesDownloaded);
|
||||
}
|
||||
}
|
||||
bis.close();
|
||||
fos.close();
|
||||
break; // Download successful: break out of infinite loop
|
||||
} catch (HttpStatusException hse) {
|
||||
logger.debug("HTTP status exception", hse);
|
||||
|
@ -76,7 +76,8 @@ class DownloadVideoThread extends Thread {
|
||||
int tries = 0; // Number of attempts to download
|
||||
do {
|
||||
InputStream bis = null; OutputStream fos = null;
|
||||
byte[] data = new byte[1024 * 256]; int bytesRead;
|
||||
byte[] data = new byte[1024 * 256];
|
||||
int bytesRead;
|
||||
try {
|
||||
logger.info(" Downloading file: " + url + (tries > 0 ? " Retry #" + tries : ""));
|
||||
observer.sendUpdate(STATUS.DOWNLOAD_STARTED, url.toExternalForm());
|
||||
|
@ -0,0 +1,270 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import org.json.JSONObject;
|
||||
|
||||
public class ArtStationRipper extends AbstractJSONRipper {
|
||||
enum URL_TYPE {
|
||||
SINGLE_PROJECT, USER_PORTFOLIO, UNKNOWN
|
||||
}
|
||||
|
||||
private ParsedURL albumURL;
|
||||
private String projectName;
|
||||
private Integer projectIndex;
|
||||
private Integer projectPageNumber;
|
||||
|
||||
public ArtStationRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getDomain() {
|
||||
return "artstation.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "ArtStation";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
JSONObject groupData;
|
||||
|
||||
// Parse URL and store for later use
|
||||
albumURL = parseURL(url);
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
|
||||
// URL points to single project, use project title as GID
|
||||
try {
|
||||
groupData = Http.url(albumURL.getLocation()).getJSON();
|
||||
} catch (IOException e) {
|
||||
throw new MalformedURLException("Couldn't load JSON from " + albumURL.getLocation());
|
||||
}
|
||||
return groupData.getString("title");
|
||||
}
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// URL points to user portfolio, use user's full name as GID
|
||||
String userInfoURL = "https://www.artstation.com/users/" + albumURL.getID() + "/quick.json";
|
||||
try {
|
||||
groupData = Http.url(userInfoURL).getJSON();
|
||||
} catch (IOException e) {
|
||||
throw new MalformedURLException("Couldn't load JSON from " + userInfoURL);
|
||||
}
|
||||
return groupData.getString("full_name");
|
||||
}
|
||||
|
||||
// No JSON found in the URL entered, can't rip
|
||||
throw new MalformedURLException(
|
||||
"Expected URL to an ArtStation project or user profile - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JSONObject getFirstPage() throws IOException {
|
||||
if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
|
||||
// URL points to JSON of a single project, just return it
|
||||
return Http.url(albumURL.getLocation()).getJSON();
|
||||
}
|
||||
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// URL points to JSON of a list of projects, load it to parse individual
|
||||
// projects
|
||||
JSONObject albumContent = Http.url(albumURL.getLocation()).getJSON();
|
||||
|
||||
if (albumContent.getInt("total_count") > 0) {
|
||||
// Get JSON of the first project and return it
|
||||
JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(0);
|
||||
ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink")));
|
||||
return Http.url(projectURL.getLocation()).getJSON();
|
||||
}
|
||||
}
|
||||
|
||||
throw new IOException("URL specified points to an user with empty portfolio");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JSONObject getNextPage(JSONObject doc) throws IOException {
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// Initialize the page number if it hasn't been initialized already
|
||||
if (projectPageNumber == null) {
|
||||
projectPageNumber = 1;
|
||||
}
|
||||
|
||||
// Each page holds a maximum of 50 projects. Initialize the index if it hasn't
|
||||
// been initialized already or increment page number and reset the index if all
|
||||
// projects of the current page were already processed
|
||||
if (projectIndex == null) {
|
||||
projectIndex = 0;
|
||||
} else if (projectIndex > 49) {
|
||||
projectPageNumber++;
|
||||
projectIndex = 0;
|
||||
}
|
||||
|
||||
Integer currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1);
|
||||
JSONObject albumContent = Http.url(albumURL.getLocation() + "?page=" + projectPageNumber).getJSON();
|
||||
|
||||
if (albumContent.getInt("total_count") > currentProject) {
|
||||
// Get JSON of the next project and return it
|
||||
JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(projectIndex);
|
||||
ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink")));
|
||||
projectIndex++;
|
||||
return Http.url(projectURL.getLocation()).getJSON();
|
||||
}
|
||||
|
||||
throw new IOException("No more projects");
|
||||
}
|
||||
|
||||
throw new IOException("Downloading a single project");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> getURLsFromJSON(JSONObject json) {
|
||||
List<String> assetURLs = new ArrayList<>();
|
||||
JSONObject currentObject;
|
||||
|
||||
// Update project name variable from JSON data. Used by downloadURL() to create
|
||||
// subfolders when input URL is URL_TYPE.USER_PORTFOLIO
|
||||
projectName = json.getString("title");
|
||||
|
||||
for (int i = 0; i < json.getJSONArray("assets").length(); i++) {
|
||||
currentObject = json.getJSONArray("assets").getJSONObject(i);
|
||||
|
||||
if (!currentObject.getString("image_url").isEmpty()) {
|
||||
// TODO: Find a way to rip external content.
|
||||
// ArtStation hosts only image content, everything else (videos, 3D Models, etc)
|
||||
// is hosted in other websites and displayed through embedded HTML5 players
|
||||
assetURLs.add(currentObject.getString("image_url"));
|
||||
}
|
||||
}
|
||||
|
||||
return assetURLs;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void downloadURL(URL url, int index) {
|
||||
if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
|
||||
// Replace not allowed characters with underlines
|
||||
String folderName = projectName.replaceAll("[\\\\/:*?\"<>|]", "_");
|
||||
|
||||
// Folder name also can't end with dots or spaces, strip them
|
||||
folderName = folderName.replaceAll("\\s+$", "");
|
||||
folderName = folderName.replaceAll("\\.+$", "");
|
||||
|
||||
// Downloading multiple projects, separate each one in subfolders
|
||||
addURLToDownload(url, "", folderName);
|
||||
} else {
|
||||
addURLToDownload(url);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String normalizeUrl(String url) {
|
||||
// Strip URL parameters
|
||||
return url.replaceAll("\\?\\w+$", "");
|
||||
}
|
||||
|
||||
private static class ParsedURL {
|
||||
URL_TYPE urlType;
|
||||
String jsonURL, urlID;
|
||||
|
||||
/**
|
||||
* Construct a new ParsedURL object.
|
||||
*
|
||||
* @param urlType URL_TYPE enum containing the URL type
|
||||
* @param jsonURL String containing the JSON URL location
|
||||
* @param urlID String containing the ID of this URL
|
||||
*
|
||||
*/
|
||||
ParsedURL(URL_TYPE urlType, String jsonURL, String urlID) {
|
||||
this.urlType = urlType;
|
||||
this.jsonURL = jsonURL;
|
||||
this.urlID = urlID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get URL Type of this ParsedURL object.
|
||||
*
|
||||
* @return URL_TYPE enum containing this object type
|
||||
*
|
||||
*/
|
||||
URL_TYPE getType() {
|
||||
return this.urlType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get JSON location of this ParsedURL object.
|
||||
*
|
||||
* @return String containing the JSON URL
|
||||
*
|
||||
*/
|
||||
String getLocation() {
|
||||
return this.jsonURL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get ID of this ParsedURL object.
|
||||
*
|
||||
* @return For URL_TYPE.SINGLE_PROJECT, returns the project hash. For
|
||||
* URL_TYPE.USER_PORTFOLIO, returns the account name
|
||||
*/
|
||||
String getID() {
|
||||
return this.urlID;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an ArtStation URL.
|
||||
*
|
||||
* @param url URL to an ArtStation user profile
|
||||
* (https://www.artstation.com/username) or single project
|
||||
* (https://www.artstation.com/artwork/projectid)
|
||||
* @return ParsedURL object containing URL type, JSON location and ID (stores
|
||||
* account name or project hash, depending of the URL type identified)
|
||||
*
|
||||
*/
|
||||
private ParsedURL parseURL(URL url) {
|
||||
String htmlSource;
|
||||
ParsedURL parsedURL;
|
||||
|
||||
// Load HTML Source of the specified URL
|
||||
try {
|
||||
htmlSource = Http.url(url).get().html();
|
||||
} catch (IOException e) {
|
||||
htmlSource = "";
|
||||
}
|
||||
|
||||
// Check if HTML Source of the specified URL references a project
|
||||
Pattern p = Pattern.compile("'/projects/(\\w+)\\.json'");
|
||||
Matcher m = p.matcher(htmlSource);
|
||||
if (m.find()) {
|
||||
parsedURL = new ParsedURL(URL_TYPE.SINGLE_PROJECT,
|
||||
"https://www.artstation.com/projects/" + m.group(1) + ".json", m.group(1));
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
// Check if HTML Source of the specified URL references a user profile
|
||||
p = Pattern.compile("'/users/([\\w-]+)/quick\\.json'");
|
||||
m = p.matcher(htmlSource);
|
||||
if (m.find()) {
|
||||
parsedURL = new ParsedURL(URL_TYPE.USER_PORTFOLIO,
|
||||
"https://www.artstation.com/users/" + m.group(1) + "/projects.json", m.group(1));
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
// HTML Source of the specified URL doesn't reference a user profile or project
|
||||
parsedURL = new ParsedURL(URL_TYPE.UNKNOWN, null, null);
|
||||
return parsedURL;
|
||||
}
|
||||
|
||||
}
|
@ -19,7 +19,7 @@ import com.rarchives.ripme.utils.RipUtils;
|
||||
|
||||
public class ChanRipper extends AbstractHTMLRipper {
|
||||
private static List<ChanSite> explicit_domains = Arrays.asList(
|
||||
new ChanSite(Arrays.asList("boards.4chan.org"), Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org")),
|
||||
new ChanSite(Arrays.asList("boards.4chan.org"), Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org", "is3.4chan.org")),
|
||||
new ChanSite(Arrays.asList("4archive.org"), Arrays.asList("imgur.com")),
|
||||
new ChanSite(Arrays.asList("archive.4plebs.org"), Arrays.asList("img.4plebs.org"))
|
||||
);
|
||||
@ -85,8 +85,22 @@ public class ChanRipper extends AbstractHTMLRipper {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return url.toExternalForm().contains("/res/") // Most chans
|
||||
|| url.toExternalForm().contains("/thread/"); // 4chan, archive.moe
|
||||
if (url.toExternalForm().contains("desuchan.net") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("boards.420chan.org") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("7chan.org") && url.toExternalForm().contains("/res/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("xchan.pw") && url.toExternalForm().contains("/board/")) {
|
||||
return true;
|
||||
}
|
||||
if (url.toExternalForm().contains("desuarchive.org")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -28,7 +28,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
private static List<String> explicit_domains_1 = Arrays.asList("tag-fox.com");
|
||||
private static List<String> explicit_domains = Arrays.asList("tag-fox.com", "kenzato.uk");
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
@ -43,13 +43,9 @@ public class CheveretoRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
String url_name = url.toExternalForm();
|
||||
if (explicit_domains_1.contains(url_name.split("/")[2])) {
|
||||
Pattern pa = Pattern.compile("(?:https?://)?(?:www\\.)?[a-z1-9-]*\\.[a-z1-9]*/album/([a-zA-Z1-9]*)/?$");
|
||||
Matcher ma = pa.matcher(url.toExternalForm());
|
||||
if (ma.matches()) {
|
||||
if (explicit_domains.contains(url_name.split("/")[2])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -70,7 +66,7 @@ public class CheveretoRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("(?:https?://)?(?:www\\.)?[a-z1-9-]*\\.[a-z1-9]*/album/([a-zA-Z1-9]*)/?$");
|
||||
Pattern p = Pattern.compile("(?:https?://)?(?:www\\.)?[a-z1-9-]*\\.[a-z1-9]*(?:[a-zA-Z1-9]*)/album/([a-zA-Z1-9]*)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
|
@ -1,8 +1,9 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ripper.AbstractJSONRipper;
|
||||
import com.rarchives.ripme.utils.Base64;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import com.rarchives.ripme.utils.RipUtils;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
@ -17,15 +18,23 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.jsoup.Connection.Method;
|
||||
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.safety.Whitelist;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
|
||||
public class DeviantartRipper extends AbstractJSONRipper {
|
||||
String requestID;
|
||||
String galleryID;
|
||||
String username;
|
||||
String baseApiUrl = "https://www.deviantart.com/dapi/v1/gallery/";
|
||||
String csrf;
|
||||
Map<String, String> pageCookies = new HashMap<>();
|
||||
|
||||
private static final int PAGE_SLEEP_TIME = 3000,
|
||||
IMAGE_SLEEP_TIME = 2000;
|
||||
@ -37,31 +46,37 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
String loginCookies = "auth=__0f9158aaec09f417b235%3B%221ff79836392a515d154216d919eae573%22;" +
|
||||
"auth_secure=__41d14dd0da101f411bb0%3B%2281cf2cf9477776162a1172543aae85ce%22;" +
|
||||
"userinfo=__bf84ac233bfa8ae642e8%3B%7B%22username%22%3A%22grabpy%22%2C%22uniqueid%22%3A%22a0a876aa37dbd4b30e1c80406ee9c280%22%2C%22vd%22%3A%22BbHUXZ%2CBbHUXZ%2CA%2CU%2CA%2C%2CB%2CA%2CB%2CBbHUXZ%2CBbHUdj%2CL%2CL%2CA%2CBbHUdj%2C13%2CA%2CB%2CA%2C%2CA%2CA%2CB%2CA%2CA%2C%2CA%22%2C%22attr%22%3A56%7D";
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "deviantart";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "deviantart.com";
|
||||
}
|
||||
@Override
|
||||
public boolean hasDescriptionSupport() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
String u = url.toExternalForm();
|
||||
if (u.contains("/gallery/")) {
|
||||
return url;
|
||||
}
|
||||
|
||||
if (u.replace("/", "").endsWith(".deviantart.com")) {
|
||||
// Root user page, get all albums
|
||||
if (!u.endsWith("/gallery/") && !u.endsWith("/gallery")) {
|
||||
if (!u.endsWith("/")) {
|
||||
u += "/";
|
||||
u += "/gallery/";
|
||||
} else {
|
||||
u += "gallery/";
|
||||
}
|
||||
u += "gallery/?";
|
||||
}
|
||||
|
||||
Pattern p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/([0-9]+)/*?$");
|
||||
|
||||
Pattern p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)/favou?rites/([0-9]+)/*?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (!m.matches()) {
|
||||
String subdir = "/";
|
||||
@ -75,7 +90,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com(/gallery)?/?(\\?.*)?$");
|
||||
Pattern p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)(/gallery)?/?(\\?.*)?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
// Root gallery
|
||||
@ -86,24 +101,53 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
return m.group(1);
|
||||
}
|
||||
}
|
||||
p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/gallery/([0-9]+).*$");
|
||||
p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)/gallery/([0-9]+).*$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
// Subgallery
|
||||
return m.group(1) + "_" + m.group(2);
|
||||
}
|
||||
p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/([0-9]+)/.*?$");
|
||||
p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)/favou?rites/([0-9]+)/.*?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1) + "_faves_" + m.group(2);
|
||||
}
|
||||
p = Pattern.compile("^https?://([a-zA-Z0-9\\-]+)\\.deviantart\\.com/favou?rites/?$");
|
||||
p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)/favou?rites/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
// Subgallery
|
||||
return m.group(1) + "_faves";
|
||||
}
|
||||
throw new MalformedURLException("Expected URL format: http://username.deviantart.com/[/gallery/#####], got: " + url);
|
||||
throw new MalformedURLException("Expected URL format: http://www.deviantart.com/username[/gallery/#####], got: " + url);
|
||||
}
|
||||
|
||||
private String getUsernameFromURL(String u) {
|
||||
Pattern p = Pattern.compile("^https?://www\\.deviantart\\.com/([a-zA-Z0-9\\-]+)/gallery/?(\\S+)?");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
private String getFullsizedNSFWImage(String pageURL) {
|
||||
try {
|
||||
Document doc = Http.url(pageURL).cookies(cookies).get();
|
||||
String imageToReturn = "";
|
||||
String[] d = doc.select("img").attr("srcset").split(",");
|
||||
|
||||
String s = d[d.length -1].split(" ")[0];
|
||||
LOGGER.info("2:" + s);
|
||||
|
||||
if (s == null || s.equals("")) {
|
||||
LOGGER.error("Could not find full sized image at " + pageURL);
|
||||
}
|
||||
return s;
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Could not find full sized image at " + pageURL);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -115,151 +159,133 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
public JSONObject getFirstPage() throws IOException {
|
||||
|
||||
//Test to see if there is a login:
|
||||
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
|
||||
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||
// Base64 da login
|
||||
// username: Z3JhYnB5
|
||||
// password: ZmFrZXJz
|
||||
|
||||
if (username == null || password == null) {
|
||||
LOGGER.debug("No DeviantArt login provided.");
|
||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||
} else {
|
||||
// Attempt Login
|
||||
try {
|
||||
cookies = loginToDeviantart();
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Failed to login: ", e);
|
||||
|
||||
cookies = getDACookies();
|
||||
if (cookies.isEmpty()) {
|
||||
LOGGER.warn("Failed to get login cookies");
|
||||
cookies.put("agegate_state","1"); // Bypasses the age gate
|
||||
}
|
||||
}
|
||||
cookies.put("agegate_state", "1");
|
||||
|
||||
|
||||
return Http.url(this.url)
|
||||
Response res = Http.url(this.url)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
.response();
|
||||
Document page = res.parse();
|
||||
|
||||
JSONObject firstPageJSON = getFirstPageJSON(page);
|
||||
requestID = firstPageJSON.getJSONObject("dapx").getString("requestid");
|
||||
galleryID = getGalleryID(page);
|
||||
username = getUsernameFromURL(url.toExternalForm());
|
||||
csrf = firstPageJSON.getString("csrf");
|
||||
pageCookies = res.cookies();
|
||||
|
||||
return requestPage(0, galleryID, username, requestID, csrf, pageCookies);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param page
|
||||
* @param id
|
||||
* @return
|
||||
*/
|
||||
private String jsonToImage(Document page, String id) {
|
||||
Elements js = page.select("script[type=\"text/javascript\"]");
|
||||
for (Element tag : js) {
|
||||
if (tag.html().contains("window.__pageload")) {
|
||||
private JSONObject requestPage(int offset, String galleryID, String username, String requestID, String csfr, Map<String, String> c) {
|
||||
LOGGER.debug("offset: " + Integer.toString(offset));
|
||||
LOGGER.debug("galleryID: " + galleryID);
|
||||
LOGGER.debug("username: " + username);
|
||||
LOGGER.debug("requestID: " + requestID);
|
||||
String url = baseApiUrl + galleryID + "?iid=" + requestID;
|
||||
try {
|
||||
String script = tag.html();
|
||||
script = script.substring(script.indexOf("window.__pageload"));
|
||||
if (!script.contains(id)) {
|
||||
continue;
|
||||
Document doc = Http.url(url).cookies(c).data("username", username).data("offset", Integer.toString(offset))
|
||||
.data("limit", "24").data("_csrf", csfr).data("id", requestID)
|
||||
.ignoreContentType().post();
|
||||
return new JSONObject(doc.body().text());
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Got error trying to get page: " + e.getMessage());
|
||||
e.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
script = script.substring(script.indexOf(id));
|
||||
// first },"src":"url" after id
|
||||
script = script.substring(script.indexOf("},\"src\":\"") + 9, script.indexOf("\",\"type\""));
|
||||
return script.replace("\\/", "/");
|
||||
} catch (StringIndexOutOfBoundsException e) {
|
||||
LOGGER.debug("Unable to get json link from " + page.location());
|
||||
|
||||
|
||||
}
|
||||
|
||||
private JSONObject getFirstPageJSON(Document doc) {
|
||||
for (Element js : doc.select("script")) {
|
||||
if (js.html().contains("requestid")) {
|
||||
String json = js.html().replaceAll("window.__initial_body_data=", "").replaceAll("\\);", "")
|
||||
.replaceAll(";__wake\\(.+", "");
|
||||
LOGGER.info("json: " + json);
|
||||
JSONObject j = new JSONObject(json);
|
||||
return j;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getGalleryID(Document doc) {
|
||||
// If the url contains catpath we return 0 as the DA api will provide all galery images if you sent the
|
||||
// gallery id to 0
|
||||
if (url.toExternalForm().contains("catpath=")) {
|
||||
return "0";
|
||||
}
|
||||
Pattern p = Pattern.compile("^https?://www\\.deviantart\\.com/[a-zA-Z0-9\\-]+/gallery/([0-9]+)/?\\S+");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
for (Element el : doc.select("input[name=set]")) {
|
||||
try {
|
||||
String galleryID = el.attr("value");
|
||||
return galleryID;
|
||||
} catch (NullPointerException e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
LOGGER.error("Could not find gallery ID");
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getUsername(Document doc) {
|
||||
return doc.select("meta[property=og:title]").attr("content")
|
||||
.replaceAll("'s DeviantArt gallery", "").replaceAll("'s DeviantArt Gallery", "");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
public List<String> getURLsFromJSON(JSONObject json) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
LOGGER.info(json);
|
||||
JSONArray results = json.getJSONObject("content").getJSONArray("results");
|
||||
for (int i = 0; i < results.length(); i++) {
|
||||
Document doc = Jsoup.parseBodyFragment(results.getJSONObject(i).getString("html"));
|
||||
if (doc.html().contains("ismature")) {
|
||||
LOGGER.info("Downloading nsfw image");
|
||||
String nsfwImage = getFullsizedNSFWImage(doc.select("span").attr("href"));
|
||||
if (nsfwImage != null && nsfwImage.startsWith("http")) {
|
||||
imageURLs.add(nsfwImage);
|
||||
}
|
||||
}
|
||||
try {
|
||||
String imageURL = doc.select("span").first().attr("data-super-full-img");
|
||||
if (!imageURL.isEmpty() && imageURL.startsWith("http")) {
|
||||
imageURLs.add(imageURL);
|
||||
}
|
||||
} catch (NullPointerException e) {
|
||||
LOGGER.info(i + " does not contain any images");
|
||||
}
|
||||
|
||||
// Iterate over all thumbnails
|
||||
for (Element thumb : page.select("div.zones-container span.thumb")) {
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
Element img = thumb.select("img").get(0);
|
||||
if (img.attr("transparent").equals("false")) {
|
||||
continue; // a.thumbs to other albums are invisible
|
||||
}
|
||||
// Get full-sized image via helper methods
|
||||
String fullSize = null;
|
||||
if (thumb.attr("data-super-full-img").contains("//orig")) {
|
||||
fullSize = thumb.attr("data-super-full-img");
|
||||
} else {
|
||||
String spanUrl = thumb.attr("href");
|
||||
String fullSize1 = jsonToImage(page,spanUrl.substring(spanUrl.lastIndexOf('-') + 1));
|
||||
if (fullSize1 == null || !fullSize1.contains("//orig")) {
|
||||
fullSize = smallToFull(img.attr("src"), spanUrl);
|
||||
}
|
||||
if (fullSize == null && fullSize1 != null) {
|
||||
fullSize = fullSize1;
|
||||
}
|
||||
}
|
||||
if (fullSize == null) {
|
||||
if (thumb.attr("data-super-full-img") != null) {
|
||||
fullSize = thumb.attr("data-super-full-img");
|
||||
} else if (thumb.attr("data-super-img") != null) {
|
||||
fullSize = thumb.attr("data-super-img");
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (triedURLs.contains(fullSize)) {
|
||||
LOGGER.warn("Already tried to download " + fullSize);
|
||||
continue;
|
||||
}
|
||||
triedURLs.add(fullSize);
|
||||
imageURLs.add(fullSize);
|
||||
|
||||
if (isThisATest()) {
|
||||
// Only need one image for a test
|
||||
break;
|
||||
}
|
||||
}
|
||||
return imageURLs;
|
||||
}
|
||||
@Override
|
||||
public List<String> getDescriptionsFromPage(Document page) {
|
||||
List<String> textURLs = new ArrayList<>();
|
||||
// Iterate over all thumbnails
|
||||
for (Element thumb : page.select("div.zones-container span.thumb")) {
|
||||
LOGGER.info(thumb.attr("href"));
|
||||
if (isStopped()) {
|
||||
break;
|
||||
}
|
||||
Element img = thumb.select("img").get(0);
|
||||
if (img.attr("transparent").equals("false")) {
|
||||
continue; // a.thumbs to other albums are invisible
|
||||
}
|
||||
textURLs.add(thumb.attr("href"));
|
||||
|
||||
}
|
||||
return textURLs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document page) throws IOException {
|
||||
if (isThisATest()) {
|
||||
return null;
|
||||
public JSONObject getNextPage(JSONObject page) throws IOException {
|
||||
boolean hasMore = page.getJSONObject("content").getBoolean("has_more");
|
||||
if (hasMore) {
|
||||
return requestPage(page.getJSONObject("content").getInt("next_offset"), galleryID, username, requestID, csrf, pageCookies);
|
||||
}
|
||||
Elements nextButtons = page.select("link[rel=\"next\"]");
|
||||
if (nextButtons.isEmpty()) {
|
||||
if (page.select("link[rel=\"prev\"]").isEmpty()) {
|
||||
throw new IOException("No next page found");
|
||||
} else {
|
||||
throw new IOException("Hit end of pages");
|
||||
}
|
||||
}
|
||||
Element a = nextButtons.first();
|
||||
String nextPage = a.attr("href");
|
||||
if (nextPage.startsWith("/")) {
|
||||
nextPage = "http://" + this.url.getHost() + nextPage;
|
||||
}
|
||||
if (!sleep(PAGE_SLEEP_TIME)) {
|
||||
throw new IOException("Interrupted while waiting to load next page: " + nextPage);
|
||||
}
|
||||
LOGGER.info("Found next page: " + nextPage);
|
||||
return Http.url(nextPage)
|
||||
.cookies(cookies)
|
||||
.get();
|
||||
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -300,61 +326,7 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to download description for image.
|
||||
* Comes in handy when people put entire stories in their description.
|
||||
* If no description was found, returns null.
|
||||
* @param url The URL the description will be retrieved from
|
||||
* @param page The gallery page the URL was found on
|
||||
* @return A String[] with first object being the description, and the second object being image file name if found.
|
||||
*/
|
||||
@Override
|
||||
public String[] getDescription(String url,Document page) {
|
||||
if (isThisATest()) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
// Fetch the image page
|
||||
Response resp = Http.url(url)
|
||||
.referrer(this.url)
|
||||
.cookies(cookies)
|
||||
.response();
|
||||
cookies.putAll(resp.cookies());
|
||||
|
||||
// Try to find the description
|
||||
Document documentz = resp.parse();
|
||||
Element ele = documentz.select("div.dev-description").first();
|
||||
if (ele == null) {
|
||||
throw new IOException("No description found");
|
||||
}
|
||||
documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
|
||||
ele.select("br").append("\\n");
|
||||
ele.select("p").prepend("\\n\\n");
|
||||
String fullSize = null;
|
||||
Element thumb = page.select("div.zones-container span.thumb[href=\"" + url + "\"]").get(0);
|
||||
if (!thumb.attr("data-super-full-img").isEmpty()) {
|
||||
fullSize = thumb.attr("data-super-full-img");
|
||||
String[] split = fullSize.split("/");
|
||||
fullSize = split[split.length - 1];
|
||||
} else {
|
||||
String spanUrl = thumb.attr("href");
|
||||
fullSize = jsonToImage(page,spanUrl.substring(spanUrl.lastIndexOf('-') + 1));
|
||||
if (fullSize != null) {
|
||||
String[] split = fullSize.split("/");
|
||||
fullSize = split[split.length - 1];
|
||||
}
|
||||
}
|
||||
if (fullSize == null) {
|
||||
return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false))};
|
||||
}
|
||||
fullSize = fullSize.substring(0, fullSize.lastIndexOf("."));
|
||||
return new String[] {Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)),fullSize};
|
||||
// TODO Make this not make a newline if someone just types \n into the description.
|
||||
} catch (IOException ioe) {
|
||||
LOGGER.info("Failed to get description at " + url + ": '" + ioe.getMessage() + "'");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If largest resolution for image at 'thumb' is found, starts downloading
|
||||
@ -426,47 +398,10 @@ public class DeviantartRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs into deviant art. Required to rip full-size NSFW content.
|
||||
* Returns DA cookies.
|
||||
* @return Map of cookies containing session data.
|
||||
*/
|
||||
private Map<String, String> loginToDeviantart() throws IOException {
|
||||
// Populate postData fields
|
||||
Map<String,String> postData = new HashMap<>();
|
||||
String username = Utils.getConfigString("deviantart.username", new String(Base64.decode("Z3JhYnB5")));
|
||||
String password = Utils.getConfigString("deviantart.password", new String(Base64.decode("ZmFrZXJz")));
|
||||
if (username == null || password == null) {
|
||||
throw new IOException("could not find username or password in config");
|
||||
}
|
||||
Response resp = Http.url("http://www.deviantart.com/")
|
||||
.response();
|
||||
for (Element input : resp.parse().select("form#form-login input[type=hidden]")) {
|
||||
postData.put(input.attr("name"), input.attr("value"));
|
||||
}
|
||||
postData.put("username", username);
|
||||
postData.put("password", password);
|
||||
postData.put("remember_me", "1");
|
||||
|
||||
// Send login request
|
||||
resp = Http.url("https://www.deviantart.com/users/login")
|
||||
.userAgent(USER_AGENT)
|
||||
.data(postData)
|
||||
.cookies(resp.cookies())
|
||||
.method(Method.POST)
|
||||
.response();
|
||||
|
||||
// Assert we are logged in
|
||||
if (resp.hasHeader("Location") && resp.header("Location").contains("password")) {
|
||||
// Wrong password
|
||||
throw new IOException("Wrong password");
|
||||
}
|
||||
if (resp.url().toExternalForm().contains("bad_form")) {
|
||||
throw new IOException("Login form was incorrectly submitted");
|
||||
}
|
||||
if (resp.cookie("auth_secure") == null ||
|
||||
resp.cookie("auth") == null) {
|
||||
throw new IOException("No auth_secure or auth cookies received");
|
||||
}
|
||||
// We are logged in, save the cookies
|
||||
return resp.cookies();
|
||||
private Map<String, String> getDACookies() {
|
||||
return RipUtils.getCookiesFromString(Utils.getConfigString("deviantart.cookies", loginCookies));
|
||||
}
|
||||
}
|
@ -96,40 +96,40 @@ public class E621Ripper extends AbstractHTMLRipper{
|
||||
|
||||
private String getTerm(URL url) throws MalformedURLException{
|
||||
if(gidPattern==null)
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
gidPattern=Pattern.compile("^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'():,%\\-]+)(/.*)?(#.*)?$");
|
||||
if(gidPatternPool==null)
|
||||
gidPatternPool=Pattern.compile("^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%-]+)(\\?.*)?(/.*)?(#.*)?$");
|
||||
gidPatternPool=Pattern.compile("^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\?.*)?(/.*)?(#.*)?$");
|
||||
|
||||
Matcher m = gidPattern.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
if(m.matches()) {
|
||||
LOGGER.info(m.group(2));
|
||||
return m.group(2);
|
||||
}
|
||||
|
||||
m = gidPatternPool.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
if(m.matches()) {
|
||||
return m.group(2);
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected e621.net URL format: e621.net/post/index/1/searchterm - got "+url+" instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
try {
|
||||
String prefix="";
|
||||
if(url.getPath().startsWith("/pool/show/"))
|
||||
prefix="pool_";
|
||||
|
||||
return Utils.filesystemSafe(prefix+new URI(getTerm(url)).getPath());
|
||||
} catch (URISyntaxException ex) {
|
||||
logger.error(ex);
|
||||
String prefix="";
|
||||
if (url.getPath().startsWith("/pool/show/")) {
|
||||
prefix = "pool_";
|
||||
}
|
||||
|
||||
throw new MalformedURLException("Expected e621.net URL format: e621.net/post/index/1/searchterm - got "+url+" instead");
|
||||
return Utils.filesystemSafe(prefix+getTerm(url));
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
if(gidPattern2==null)
|
||||
gidPattern2=Pattern.compile("^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
|
||||
gidPattern2=Pattern.compile("^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'():,%-]+)(/.*)?(#.*)?$");
|
||||
|
||||
Matcher m = gidPattern2.matcher(url.toExternalForm());
|
||||
if(m.matches())
|
||||
|
@ -11,6 +11,7 @@ import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
@ -116,14 +117,13 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
image = thumb.attr("data-cfsrc");
|
||||
}
|
||||
else {
|
||||
String imageHref = thumb.attr("href");
|
||||
if (imageHref.equals("")) continue;
|
||||
if (imageHref.startsWith("/")) {
|
||||
imageHref = "https://www.8muses.com" + imageHref;
|
||||
}
|
||||
// Deobfustace the json data
|
||||
String rawJson = deobfuscateJSON(page.select("script#ractive-public").html()
|
||||
.replaceAll(">", ">").replaceAll("<", "<").replace("&", "&"));
|
||||
JSONObject json = new JSONObject(rawJson);
|
||||
try {
|
||||
LOGGER.info("Retrieving full-size image location from " + imageHref);
|
||||
image = getFullSizeImage(imageHref);
|
||||
for (int i = 0; i != json.getJSONArray("pictures").length(); i++) {
|
||||
image = "https://www.8muses.com/image/fm/" + json.getJSONArray("pictures").getJSONObject(i).getString("publicUri");
|
||||
URL imageUrl = new URL(image);
|
||||
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||
addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true);
|
||||
@ -132,9 +132,9 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
// X is our page index
|
||||
x++;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to get full-size image from " + imageHref);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -154,7 +154,7 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
sendUpdate(STATUS.LOADING_RESOURCE, imageUrl);
|
||||
LOGGER.info("Getting full sized image from " + imageUrl);
|
||||
Document doc = new Http(imageUrl).get(); // Retrieve the webpage of the image URL
|
||||
String imageName = doc.select("input[id=imageName]").attr("value"); // Select the "input" element from the page
|
||||
String imageName = doc.select("div.photo > a > img").attr("src");
|
||||
return "https://www.8muses.com/image/fm/" + imageName;
|
||||
}
|
||||
|
||||
@ -189,4 +189,25 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
||||
public String getPrefixShort(int index) {
|
||||
return String.format("%03d", index);
|
||||
}
|
||||
|
||||
private String deobfuscateJSON(String obfuscatedString) {
|
||||
StringBuilder deobfuscatedString = new StringBuilder();
|
||||
// The first char in one of 8muses obfuscated strings is always ! so we replace it
|
||||
for (char ch : obfuscatedString.replaceFirst("!", "").toCharArray()){
|
||||
deobfuscatedString.append(deobfuscateChar(ch));
|
||||
}
|
||||
return deobfuscatedString.toString();
|
||||
}
|
||||
|
||||
private String deobfuscateChar(char c) {
|
||||
if ((int) c == 32) {
|
||||
return fromCharCode(32);
|
||||
}
|
||||
return fromCharCode(33 + (c + 14) % 94);
|
||||
|
||||
}
|
||||
|
||||
private static String fromCharCode(int... codePoints) {
|
||||
return new String(codePoints, 0, codePoints.length);
|
||||
}
|
||||
}
|
@ -22,6 +22,8 @@ import com.rarchives.ripme.utils.Http;
|
||||
*/
|
||||
public class EromeRipper extends AbstractHTMLRipper {
|
||||
|
||||
boolean rippingProfile;
|
||||
|
||||
|
||||
public EromeRipper (URL url) throws IOException {
|
||||
super(url);
|
||||
@ -42,6 +44,27 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasQueueSupport() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean pageContainsAlbums(URL url) {
|
||||
Pattern pa = Pattern.compile("https?://www.erome.com/([a-zA-Z0-9_-]*)/?");
|
||||
Matcher ma = pa.matcher(url.toExternalForm());
|
||||
return ma.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAlbumsToQueue(Document doc) {
|
||||
List<String> urlsToAddToQueue = new ArrayList<>();
|
||||
for (Element elem : doc.select("div#albums > div.album > a")) {
|
||||
urlsToAddToQueue.add(elem.attr("href"));
|
||||
}
|
||||
return urlsToAddToQueue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
@ -53,6 +76,8 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
LOGGER.info("Unable to find title at " + url);
|
||||
} catch (NullPointerException e) {
|
||||
return getHost() + "_" + getGID(url);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
@ -66,21 +91,7 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> URLs = new ArrayList<>();
|
||||
//Pictures
|
||||
Elements imgs = doc.select("div.img > img.img-front");
|
||||
for (Element img : imgs) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(imageURL);
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.select("div.video > video > source");
|
||||
for (Element vid : vids) {
|
||||
String videoURL = vid.attr("src");
|
||||
URLs.add("https:" + videoURL);
|
||||
}
|
||||
|
||||
return URLs;
|
||||
return getMediaFromPage(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -94,13 +105,13 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://www.erome.com/a/([a-zA-Z0-9]*)/?$");
|
||||
Pattern p = Pattern.compile("^https?://www.erome.com/[ai]/([a-zA-Z0-9]*)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
p = Pattern.compile("^https?://erome.com/a/([a-zA-Z0-9]*)/?$");
|
||||
p = Pattern.compile("^https?://www.erome.com/([a-zA-Z0-9_-]+)/?$");
|
||||
m = p.matcher(url.toExternalForm());
|
||||
|
||||
if (m.matches()) {
|
||||
@ -110,34 +121,15 @@ public class EromeRipper extends AbstractHTMLRipper {
|
||||
throw new MalformedURLException("erome album not found in " + url + ", expected https://www.erome.com/album");
|
||||
}
|
||||
|
||||
public static List<URL> getURLs(URL url) throws IOException{
|
||||
|
||||
Response resp = Http.url(url)
|
||||
.ignoreContentType()
|
||||
.response();
|
||||
|
||||
Document doc = resp.parse();
|
||||
|
||||
List<URL> URLs = new ArrayList<>();
|
||||
//Pictures
|
||||
Elements imgs = doc.getElementsByTag("img");
|
||||
for (Element img : imgs) {
|
||||
if (img.hasClass("album-image")) {
|
||||
String imageURL = img.attr("src");
|
||||
imageURL = "https:" + imageURL;
|
||||
URLs.add(new URL(imageURL));
|
||||
private List<String> getMediaFromPage(Document doc) {
|
||||
List<String> results = new ArrayList<>();
|
||||
for (Element el : doc.select("img.img-front")) {
|
||||
results.add("https:" + el.attr("src"));
|
||||
}
|
||||
for (Element el : doc.select("source[label=HD]")) {
|
||||
results.add("https:" + el.attr("src"));
|
||||
}
|
||||
//Videos
|
||||
Elements vids = doc.getElementsByTag("video");
|
||||
for (Element vid : vids) {
|
||||
if (vid.hasClass("album-video")) {
|
||||
Elements source = vid.getElementsByTag("source");
|
||||
String videoURL = source.first().attr("src");
|
||||
URLs.add(new URL(videoURL));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
return URLs;
|
||||
}
|
||||
}
|
||||
|
@ -12,8 +12,10 @@ import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.Connection.Response;
|
||||
import org.jsoup.HttpStatusException;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
@ -24,13 +26,27 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.ripper.DownloadThreadPool;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import static com.rarchives.ripme.utils.RipUtils.getCookiesFromString;
|
||||
|
||||
public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
|
||||
private static final String urlBase = "https://www.furaffinity.net";
|
||||
private static Map<String,String> cookies = new HashMap<>();
|
||||
static {
|
||||
cookies.put("b", "bd5ccac8-51dc-4265-8ae1-7eac685ad667");
|
||||
cookies.put("a", "7c41b782-d01d-4b0e-b45b-62a4f0b2a369");
|
||||
private Map<String,String> cookies = new HashMap<>();
|
||||
|
||||
private void setCookies() {
|
||||
if (Utils.getConfigBoolean("furaffinity.login", true)) {
|
||||
LOGGER.info("Logging in using cookies");
|
||||
String faCookies = Utils.getConfigString("furaffinity.cookies", "a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267");
|
||||
warnAboutSharedAccount(faCookies);
|
||||
cookies = getCookiesFromString(faCookies);
|
||||
}
|
||||
}
|
||||
|
||||
private void warnAboutSharedAccount(String loginCookies) {
|
||||
if (loginCookies.equals("a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267")) {
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED,
|
||||
"WARNING: Using the shared furaffinity account exposes both your IP and how many items you downloaded to the other users of the share account");
|
||||
}
|
||||
}
|
||||
|
||||
// Thread pool for finding direct image links from "image" pages (html)
|
||||
@ -61,6 +77,8 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
setCookies();
|
||||
LOGGER.info(Http.url(url).cookies(cookies).get().html());
|
||||
return Http.url(url).cookies(cookies).get();
|
||||
}
|
||||
|
||||
@ -80,20 +98,34 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
}
|
||||
|
||||
private String getImageFromPost(String url) {
|
||||
sleep(1000);
|
||||
Document d = null;
|
||||
try {
|
||||
LOGGER.info("found url " + Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content"));
|
||||
return Http.url(url).cookies(cookies).get().select("meta[property=og:image]").attr("content");
|
||||
} catch (IOException e) {
|
||||
return "";
|
||||
d = Http.url(url).cookies(cookies).get();
|
||||
Elements links = d.getElementsByTag("a");
|
||||
for (Element link : links) {
|
||||
if (link.text().equals("Download")) {
|
||||
LOGGER.info("Found image " + link.attr("href"));
|
||||
return "https:" + link.attr("href");
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document page) {
|
||||
List<String> urls = new ArrayList<>();
|
||||
Elements urlElements = page.select("figure.t-image > b > u > a");
|
||||
for (Element e : urlElements) {
|
||||
urls.add(getImageFromPost(urlBase + e.select("a").first().attr("href")));
|
||||
String urlToAdd = getImageFromPost(urlBase + e.select("a").first().attr("href"));
|
||||
if (url != null) {
|
||||
if (urlToAdd.startsWith("http")) {
|
||||
urls.add(urlToAdd);
|
||||
}
|
||||
}
|
||||
}
|
||||
return urls;
|
||||
}
|
||||
@ -200,16 +232,5 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
|
||||
+ " instead");
|
||||
}
|
||||
|
||||
private class FuraffinityDocumentThread extends Thread {
|
||||
private URL url;
|
||||
|
||||
FuraffinityDocumentThread(URL url) {
|
||||
super();
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,18 +1,22 @@
|
||||
package com.rarchives.ripme.ripper.rippers.video;
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractSingleFileRipper;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.VideoRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class GfycatRipper extends VideoRipper {
|
||||
|
||||
public class GfycatRipper extends AbstractSingleFileRipper {
|
||||
|
||||
private static final String HOST = "gfycat.com";
|
||||
|
||||
@ -20,9 +24,14 @@ public class GfycatRipper extends VideoRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "gfycat.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
return "gfycat";
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -37,6 +46,16 @@ public class GfycatRipper extends VideoRipper {
|
||||
return url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://[wm.]*gfycat\\.com/([a-zA-Z0-9]+).*$");
|
||||
@ -52,10 +71,15 @@ public class GfycatRipper extends VideoRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
String vidUrl = getVideoURL(this.url);
|
||||
addURLToDownload(new URL(vidUrl), "gfycat_" + getGID(this.url));
|
||||
waitForThreads();
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
Elements videos = doc.select("source#mp4Source");
|
||||
String vidUrl = videos.first().attr("src");
|
||||
if (vidUrl.startsWith("//")) {
|
||||
vidUrl = "http:" + vidUrl;
|
||||
}
|
||||
result.add(vidUrl);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
@ -8,13 +8,12 @@ import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractSingleFileRipper;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class GfycatporntubeRipper extends AbstractHTMLRipper {
|
||||
public class GfycatporntubeRipper extends AbstractSingleFileRipper {
|
||||
|
||||
public GfycatporntubeRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
|
@ -13,10 +13,11 @@ import org.jsoup.nodes.Document;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
public class HitomiRipper extends AbstractHTMLRipper {
|
||||
|
||||
String galleryId = "";
|
||||
private String galleryId = "";
|
||||
|
||||
public HitomiRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
@ -47,7 +48,7 @@ public class HitomiRipper extends AbstractHTMLRipper {
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// if we go to /GALLERYID.js we get a nice json array of all images in the gallery
|
||||
return Http.url(new URL(url.toExternalForm().replaceAll(".html", ".js"))).ignoreContentType().get();
|
||||
return Http.url(new URL(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js"))).ignoreContentType().get();
|
||||
}
|
||||
|
||||
|
||||
@ -64,6 +65,19 @@ public class HitomiRipper extends AbstractHTMLRipper {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
// Attempt to use album title and username as GID
|
||||
Document doc = Http.url(url).get();
|
||||
return getHost() + "_" + getGID(url) + "_" +
|
||||
doc.select("title").text().replaceAll(" - Read Online - hentai artistcg \\| Hitomi.la", "");
|
||||
} catch (IOException e) {
|
||||
LOGGER.info("Falling back");
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
|
@ -0,0 +1,112 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class ImagearnRipper extends AbstractHTMLRipper {
|
||||
|
||||
public ImagearnRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "imagearn";
|
||||
}
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "imagearn.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^.*imagearn.com/+gallery.php\\?id=([0-9]+).*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException(
|
||||
"Expected imagearn.com gallery formats: "
|
||||
+ "imagearn.com/gallery.php?id=####..."
|
||||
+ " Got: " + url);
|
||||
}
|
||||
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^.*imagearn.com/+image.php\\?id=[0-9]+.*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
// URL points to imagearn *image*, not gallery
|
||||
try {
|
||||
url = getGalleryFromImage(url);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("[!] " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
return url;
|
||||
}
|
||||
|
||||
private URL getGalleryFromImage(URL url) throws IOException {
|
||||
Document doc = Http.url(url).get();
|
||||
for (Element link : doc.select("a[href~=^gallery\\.php.*$]")) {
|
||||
LOGGER.info("LINK: " + link.toString());
|
||||
if (link.hasAttr("href")
|
||||
&& link.attr("href").contains("gallery.php")) {
|
||||
url = new URL("http://imagearn.com/" + link.attr("href"));
|
||||
LOGGER.info("[!] Found gallery from given link: " + url);
|
||||
return url;
|
||||
}
|
||||
}
|
||||
throw new IOException("Failed to find gallery at URL " + url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
Document doc = getFirstPage();
|
||||
String title = doc.select("h3 > strong").first().text(); // profile name
|
||||
return getHost() + "_" + title + "_" + getGID(url);
|
||||
} catch (Exception e) {
|
||||
// Fall back to default album naming convention
|
||||
LOGGER.warn("Failed to get album title from " + url, e);
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
for (Element thumb : doc.select("div#gallery > div > a")) {
|
||||
String imageURL = thumb.attr("href");
|
||||
try {
|
||||
Document imagedoc = new Http("http://imagearn.com/" + imageURL).get();
|
||||
String image = imagedoc.select("a.thickbox").first().attr("href");
|
||||
imageURLs.add(image);
|
||||
} catch (IOException e) {
|
||||
LOGGER.warn("Was unable to download page: " + imageURL);
|
||||
}
|
||||
}
|
||||
return imageURLs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
sleep(1000);
|
||||
}
|
||||
}
|
@ -125,11 +125,7 @@ public class ImagefapRipper extends AbstractHTMLRipper {
|
||||
if (!thumb.hasAttr("src") || !thumb.hasAttr("width")) {
|
||||
continue;
|
||||
}
|
||||
String image = thumb.attr("src");
|
||||
image = image.replaceAll(
|
||||
"http://x.*.fap.to/images/thumb/",
|
||||
"http://fap.to/images/full/");
|
||||
image = image.replaceAll("w[0-9]+-h[0-9]+/", "");
|
||||
String image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href"));
|
||||
imageURLs.add(image);
|
||||
if (isThisATest()) {
|
||||
break;
|
||||
@ -160,4 +156,13 @@ public class ImagefapRipper extends AbstractHTMLRipper {
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
|
||||
private String getFullSizedImage(String pageURL) {
|
||||
try {
|
||||
Document doc = Http.url(pageURL).get();
|
||||
return doc.select("img#mainPhoto").attr("src");
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -76,6 +76,10 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return url.replaceAll("/[A-Z0-9]{8}/", "/");
|
||||
}
|
||||
|
||||
@Override public boolean hasASAPRipping() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private List<String> getPostsFromSinglePage(JSONObject json) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
JSONArray datas;
|
||||
@ -231,9 +235,23 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return imageURL;
|
||||
}
|
||||
|
||||
public String getAfter(JSONObject json) {
|
||||
try {
|
||||
return json.getJSONObject("entry_data").getJSONArray("ProfilePage").getJSONObject(0)
|
||||
.getJSONObject("graphql").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONObject("page_info").getString("end_cursor");
|
||||
} catch (JSONException e) {
|
||||
return json.getJSONObject("data").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONObject("page_info").getString("end_cursor");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromJSON(JSONObject json) {
|
||||
List<String> imageURLs = new ArrayList<>();
|
||||
if (!url.toExternalForm().contains("/p/")) {
|
||||
nextPageID = getAfter(json);
|
||||
}
|
||||
|
||||
// get the rhx_gis value so we can get the next page later on
|
||||
if (rhx_gis == null) {
|
||||
@ -246,7 +264,8 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
try {
|
||||
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
||||
userID = profilePage.getJSONObject(0).getString("logging_page_id").replaceAll("profilePage_", "");
|
||||
datas = profilePage.getJSONObject(0).getJSONObject("graphql").getJSONObject("user")
|
||||
datas = json.getJSONObject("entry_data").getJSONArray("ProfilePage").getJSONObject(0)
|
||||
.getJSONObject("graphql").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||
} catch (JSONException e) {
|
||||
datas = json.getJSONObject("data").getJSONObject("user")
|
||||
@ -300,11 +319,10 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
}
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
LOGGER.info("Got MalformedURLException");
|
||||
return imageURLs;
|
||||
}
|
||||
|
||||
nextPageID = data.getString("id");
|
||||
|
||||
if (isThisATest()) {
|
||||
break;
|
||||
}
|
||||
@ -368,10 +386,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
try {
|
||||
// Sleep for a while to avoid a ban
|
||||
sleep(2500);
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":50,\"after\":\"" + nextPageID + "\"}";
|
||||
String vars = "{\"id\":\"" + userID + "\",\"first\":12,\"after\":\"" + nextPageID + "\"}";
|
||||
String ig_gis = getIGGis(vars);
|
||||
LOGGER.info(ig_gis);
|
||||
|
||||
LOGGER.info("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars);
|
||||
toreturn = getPage("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" + vars, ig_gis);
|
||||
if (!pageHasImages(toreturn)) {
|
||||
throw new IOException("No more pages");
|
||||
@ -391,6 +410,7 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
}
|
||||
|
||||
private boolean pageHasImages(JSONObject json) {
|
||||
LOGGER.info(json);
|
||||
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
||||
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
||||
if (numberOfImages == 0) {
|
||||
@ -451,26 +471,11 @@ public class InstagramRipper extends AbstractJSONRipper {
|
||||
return null;
|
||||
}
|
||||
if (!rippingTag) {
|
||||
Pattern jsP = Pattern.compile("o},queryId:.([a-zA-Z0-9]+).");
|
||||
Pattern jsP = Pattern.compile("byUserId\\.get\\(t\\)\\)\\|\\|void 0===r\\?void 0:r\\.pagination},queryId:.([a-zA-Z0-9]+)");
|
||||
Matcher m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
jsP = Pattern.compile("n.pagination:n},queryId:.([a-zA-Z0-9]+).");
|
||||
m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
jsP = Pattern.compile("0:n.pagination},queryId:.([a-zA-Z0-9]+).");
|
||||
m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
jsP = Pattern.compile("o.pagination},queryId:.([a-zA-Z0-9]+).");
|
||||
m = jsP.matcher(sb.toString());
|
||||
if (m.find()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
} else {
|
||||
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||
|
@ -0,0 +1,78 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
public class JabArchivesRipper extends AbstractHTMLRipper {
|
||||
|
||||
public JabArchivesRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "jabarchives";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "jabarchives.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://jabarchives.com/main/view/([a-zA-Z0-9_]+).*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
// Return the text contained between () in the regex
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException(
|
||||
"Expected javarchives.com URL format: " +
|
||||
"jabarchives.com/main/view/albumname - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
// Find next page
|
||||
Elements hrefs = doc.select("a[title=\"Next page\"]");
|
||||
if (hrefs.isEmpty()) {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
String nextUrl = "https://jabarchives.com" + hrefs.first().attr("href");
|
||||
sleep(500);
|
||||
return Http.url(nextUrl).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<String>();
|
||||
for (Element el : doc.select("#contentMain img")) {
|
||||
result.add("https://jabarchives.com" + el.attr("src").replace("thumb", "large"));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -0,0 +1,123 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class LoveromRipper extends AbstractHTMLRipper {
|
||||
|
||||
public LoveromRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
private int bytesTotal = 1;
|
||||
private int bytesCompleted = 1;
|
||||
boolean multipart = false;
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "loveroms";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "loveroms.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https://www.loveroms.com/download/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)/\\d+");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (!m.matches()) {
|
||||
throw new MalformedURLException("Expected URL format: https://www.loveroms.com/download/CONSOLE/GAME, got: " + url);
|
||||
}
|
||||
return m.group(1) + "_" + m.group(2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
String downloadLink = doc.select("a#start_download_link").attr("href");
|
||||
if (downloadLink != null && !downloadLink.isEmpty()) {
|
||||
result.add(downloadLink);
|
||||
} else {
|
||||
multipart = true;
|
||||
for (Element el : doc.select("a.multi-file-btn")) {
|
||||
result.add(el.attr("href"));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
if (multipart) {
|
||||
addURLToDownload(url, "", "", "", null, null, "7z." + getPrefix(index));
|
||||
} else {
|
||||
addURLToDownload(url, "", "", "", null, null, "7z");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getStatusText() {
|
||||
if (multipart) {
|
||||
return super.getStatusText();
|
||||
}
|
||||
return String.valueOf(getCompletionPercentage()) +
|
||||
"% - " +
|
||||
Utils.bytesToHumanReadable(bytesCompleted) +
|
||||
" / " +
|
||||
Utils.bytesToHumanReadable(bytesTotal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCompletionPercentage() {
|
||||
if (multipart) {
|
||||
return super.getCompletionPercentage();
|
||||
}
|
||||
return (int) (100 * (bytesCompleted / (float) bytesTotal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytesTotal(int bytes) {
|
||||
this.bytesTotal = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytesCompleted(int bytes) {
|
||||
this.bytesCompleted = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean useByteProgessBar() {return true;}
|
||||
|
||||
@Override
|
||||
public boolean tryResumeDownload() {return true;}
|
||||
|
||||
@Override
|
||||
public String getPrefix(int index) {
|
||||
String prefix = "";
|
||||
if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
|
||||
prefix = String.format("7z.%03d", index);
|
||||
}
|
||||
return prefix;
|
||||
}
|
||||
}
|
@ -4,7 +4,9 @@ import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@ -16,6 +18,8 @@ import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class ModelmayhemRipper extends AbstractHTMLRipper {
|
||||
|
||||
private Map<String,String> cookies = new HashMap<>();
|
||||
|
||||
public ModelmayhemRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
@ -43,8 +47,10 @@ public class ModelmayhemRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// Bypass NSFW filter
|
||||
cookies.put("worksafe", "0");
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
return Http.url(url).cookies(cookies).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,83 @@
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class PicstatioRipper extends AbstractHTMLRipper {
|
||||
|
||||
public PicstatioRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
private String getFullSizedImageFromURL(String fileName) {
|
||||
try {
|
||||
LOGGER.info("https://www.picstatio.com/wallpaper/" + fileName + "/download");
|
||||
return Http.url("https://www.picstatio.com/wallpaper/" + fileName + "/download").get().select("p.text-center > span > a").attr("href");
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return "picstatio";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return "picstatio.com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("https?://www.picstatio.com/([a-zA-Z1-9_-]*)/?$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
throw new MalformedURLException("Expected picstatio URL format: " +
|
||||
"www.picstatio.com//ID - got " + url + " instead");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
// "url" is an instance field of the superclass
|
||||
return Http.url(url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (doc.select("a.next_page") != null) {
|
||||
return Http.url("https://www.picstatio.com" + doc.select("a.next_page").attr("href")).get();
|
||||
}
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Element e : doc.select("img.img")) {
|
||||
String imageName = e.parent().attr("href");
|
||||
LOGGER.info(getFullSizedImageFromURL(imageName.split("/")[2]));
|
||||
result.add(getFullSizedImageFromURL(imageName.split("/")[2]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -8,6 +8,7 @@ import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
import org.json.JSONTokener;
|
||||
@ -17,6 +18,10 @@ import com.rarchives.ripme.ui.UpdateUtils;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import com.rarchives.ripme.utils.RipUtils;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
import org.jsoup.Jsoup;
|
||||
|
||||
import javax.swing.text.Document;
|
||||
import javax.swing.text.Element;
|
||||
|
||||
public class RedditRipper extends AlbumRipper {
|
||||
|
||||
@ -35,6 +40,10 @@ public class RedditRipper extends AlbumRipper {
|
||||
|
||||
private long lastRequestTime = 0;
|
||||
|
||||
private Boolean shouldAddURL() {
|
||||
return (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
return url.getHost().endsWith(DOMAIN);
|
||||
@ -61,6 +70,10 @@ public class RedditRipper extends AlbumRipper {
|
||||
public void rip() throws IOException {
|
||||
URL jsonURL = getJsonURL(this.url);
|
||||
while (true) {
|
||||
if (shouldAddURL()) {
|
||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
|
||||
break;
|
||||
}
|
||||
jsonURL = getAndParseAndReturnNext(jsonURL);
|
||||
if (jsonURL == null || isThisATest() || isStopped()) {
|
||||
break;
|
||||
@ -179,6 +192,32 @@ public class RedditRipper extends AlbumRipper {
|
||||
}
|
||||
}
|
||||
|
||||
private URL parseRedditVideoMPD(String vidURL) {
|
||||
org.jsoup.nodes.Document doc = null;
|
||||
try {
|
||||
doc = Http.url(vidURL + "/DASHPlaylist.mpd").ignoreContentType().get();
|
||||
int largestHeight = 0;
|
||||
String baseURL = null;
|
||||
// Loops over all the videos and finds the one with the largest height and sets baseURL to the base url of that video
|
||||
for (org.jsoup.nodes.Element e : doc.select("MPD > Period > AdaptationSet > Representation")) {
|
||||
String height = e.attr("height");
|
||||
if (height.equals("")) {
|
||||
height = "0";
|
||||
}
|
||||
if (largestHeight < Integer.parseInt(height)) {
|
||||
largestHeight = Integer.parseInt(height);
|
||||
baseURL = doc.select("MPD > Period > AdaptationSet > Representation[height=" + height + "]").select("BaseURL").text();
|
||||
}
|
||||
LOGGER.info("H " + e.attr("height") + " V " + e.attr("width"));
|
||||
}
|
||||
return new URL(vidURL + "/" + baseURL);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
private void handleURL(String theUrl, String id) {
|
||||
URL originalURL;
|
||||
try {
|
||||
@ -198,6 +237,11 @@ public class RedditRipper extends AlbumRipper {
|
||||
savePath += id + "-" + m.group(1) + ".jpg";
|
||||
addURLToDownload(urls.get(0), new File(savePath));
|
||||
}
|
||||
if (url.contains("v.redd.it")) {
|
||||
String savePath = this.workingDir + File.separator;
|
||||
savePath += id + "-" + url.split("/")[3] + ".mp4";
|
||||
addURLToDownload(parseRedditVideoMPD(urls.get(0).toExternalForm()), new File(savePath));
|
||||
}
|
||||
else {
|
||||
addURLToDownload(urls.get(0), id + "-", "", theUrl, null);
|
||||
}
|
||||
|
@ -235,8 +235,12 @@ public class TumblrRipper extends AlbumRipper {
|
||||
for (int j = 0; j < photos.length(); j++) {
|
||||
photo = photos.getJSONObject(j);
|
||||
try {
|
||||
if (Utils.getConfigBoolean("tumblr.get_raw_image", false)) {
|
||||
String urlString = photo.getJSONObject("original_size").getString("url").replaceAll("https", "http");
|
||||
String imageUrl = photo.getJSONObject("original_size").getString("url");
|
||||
// If the url is shorter than 65 chars long we skip it because it's those images don't support grabbing them in fullsize
|
||||
if (Utils.getConfigBoolean("tumblr.get_raw_image", false) &&
|
||||
imageUrl.replaceAll("https", "http").length() > 65) {
|
||||
// We have to change the link to http because tumblr uses an invalid cert for data.tumblr.com
|
||||
String urlString = imageUrl.replaceAll("https", "http");
|
||||
urlString = urlString.replaceAll("https?://[a-sA-Z0-9_\\-\\.]*\\.tumblr", "http://data.tumblr");
|
||||
urlString = urlString.replaceAll("_\\d+\\.", "_raw.");
|
||||
fileURL = new URL(urlString);
|
||||
|
@ -20,6 +20,8 @@ import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
public class TwitterRipper extends AlbumRipper {
|
||||
|
||||
int downloadUrls = 1;
|
||||
|
||||
private static final String DOMAIN = "twitter.com",
|
||||
HOST = "twitter";
|
||||
|
||||
@ -123,15 +125,16 @@ public class TwitterRipper extends AlbumRipper {
|
||||
.append("&include_entities=true")
|
||||
.append("&exclude_replies=true")
|
||||
.append("&trim_user=true")
|
||||
.append("&include_rts=false")
|
||||
.append("&count=" + 200);
|
||||
.append("&count=" + 200)
|
||||
.append("&tweet_mode=extended");
|
||||
break;
|
||||
case SEARCH:
|
||||
req.append("https://api.twitter.com/1.1/search/tweets.json")
|
||||
.append("?q=" + this.searchText)
|
||||
.append("&include_entities=true")
|
||||
.append("&result_type=recent")
|
||||
.append("&count=100");
|
||||
.append("&count=100")
|
||||
.append("&tweet_mode=extended");
|
||||
break;
|
||||
}
|
||||
if (maxID > 0) {
|
||||
@ -187,18 +190,32 @@ public class TwitterRipper extends AlbumRipper {
|
||||
url = media.getString("media_url");
|
||||
if (media.getString("type").equals("video")) {
|
||||
JSONArray variants = media.getJSONObject("video_info").getJSONArray("variants");
|
||||
int largestBitrate = 0;
|
||||
String urlToDownload = null;
|
||||
// Loop over all the video options and find the biggest video
|
||||
for (int j = 0; j < medias.length(); j++) {
|
||||
JSONObject variant = (JSONObject) variants.get(i);
|
||||
if (variant.has("bitrate") && variant.getInt("bitrate") == 832000) {
|
||||
addURLToDownload(new URL(variant.getString("url")));
|
||||
LOGGER.info(variant);
|
||||
// If the video doesn't have a bitrate it's a m3u8 file we can't download
|
||||
if (variant.has("bitrate")) {
|
||||
if (variant.getInt("bitrate") > largestBitrate) {
|
||||
largestBitrate = variant.getInt("bitrate");
|
||||
urlToDownload = variant.getString("url");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (urlToDownload != null) {
|
||||
addURLToDownload(new URL(urlToDownload), getPrefix(downloadUrls));
|
||||
downloadUrls++;
|
||||
} else {
|
||||
LOGGER.error("URLToDownload was null");
|
||||
}
|
||||
parsedCount++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (media.getString("type").equals("photo")) {
|
||||
if (url.contains(".twimg.com/")) {
|
||||
url += ":orig";
|
||||
addURLToDownload(new URL(url));
|
||||
addURLToDownload(new URL(url), getPrefix(downloadUrls));
|
||||
downloadUrls++;
|
||||
parsedCount++;
|
||||
} else {
|
||||
LOGGER.debug("Unexpected media_url: " + url);
|
||||
@ -211,6 +228,10 @@ public class TwitterRipper extends AlbumRipper {
|
||||
return parsedCount;
|
||||
}
|
||||
|
||||
public String getPrefix(int index) {
|
||||
return String.format("%03d_", index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
getAccessToken();
|
||||
|
@ -62,19 +62,21 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
Pattern p = Pattern.compile("^https?://[wmde.]*xhamster\\.com/photos/gallery/.*?(\\d+)$");
|
||||
Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster\\.com/photos/gallery/.*?(\\d+)$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getNextPage(Document doc) throws IOException {
|
||||
if (!doc.select("a.next").first().attr("href").equals("")) {
|
||||
if (doc.select("a.next").first() != null) {
|
||||
if (doc.select("a.next").first().attr("href").startsWith("http")) {
|
||||
return Http.url(doc.select("a.next").first().attr("href")).get();
|
||||
} else {
|
||||
throw new IOException("No more pages");
|
||||
}
|
||||
}
|
||||
throw new IOException("No more pages");
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
@ -100,4 +102,23 @@ public class XhamsterRipper extends AbstractHTMLRipper {
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
// Attempt to use album title and username as GID
|
||||
Document doc = getFirstPage();
|
||||
Element user = doc.select("a.author").first();
|
||||
String username = user.text();
|
||||
String path = url.getPath();
|
||||
Pattern p = Pattern.compile("^/photos/gallery/(.*)$");
|
||||
Matcher m = p.matcher(path);
|
||||
if (m.matches() && !username.isEmpty()) {
|
||||
return getHost() + "_" + username + "_" + m.group(1);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Fall back to default album naming convention
|
||||
}
|
||||
return super.getAlbumTitle(url);
|
||||
}
|
||||
}
|
@ -1,19 +1,22 @@
|
||||
package com.rarchives.ripme.ripper.rippers.video;
|
||||
package com.rarchives.ripme.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractSingleFileRipper;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.VideoRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class XvideosRipper extends VideoRipper {
|
||||
public class XvideosRipper extends AbstractSingleFileRipper {
|
||||
|
||||
private static final String HOST = "xvideos";
|
||||
|
||||
@ -21,11 +24,21 @@ public class XvideosRipper extends VideoRipper {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Document getFirstPage() throws IOException {
|
||||
return Http.url(this.url).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDomain() {
|
||||
return HOST + ".com";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
Pattern p = Pattern.compile("^https?://[wm.]*xvideos\\.com/video[0-9]+.*$");
|
||||
@ -33,11 +46,6 @@ public class XvideosRipper extends VideoRipper {
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
return url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://[wm.]*xvideos\\.com/video([0-9]+).*$");
|
||||
@ -53,9 +61,8 @@ public class XvideosRipper extends VideoRipper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
LOGGER.info(" Retrieving " + this.url);
|
||||
Document doc = Http.url(this.url).get();
|
||||
public List<String> getURLsFromPage(Document doc) {
|
||||
List<String> results = new ArrayList<>();
|
||||
Elements scripts = doc.select("script");
|
||||
for (Element e : scripts) {
|
||||
if (e.html().contains("html5player.setVideoUrlHigh")) {
|
||||
@ -64,13 +71,16 @@ public class XvideosRipper extends VideoRipper {
|
||||
for (String line: lines) {
|
||||
if (line.contains("html5player.setVideoUrlHigh")) {
|
||||
String videoURL = line.replaceAll("\t", "").replaceAll("html5player.setVideoUrlHigh\\(", "").replaceAll("\'", "").replaceAll("\\);", "");
|
||||
addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url));
|
||||
waitForThreads();
|
||||
return;
|
||||
results.add(videoURL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IOException("Unable to find video url at " + this.url.toExternalForm());
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void downloadURL(URL url, int index) {
|
||||
addURLToDownload(url, getPrefix(index));
|
||||
}
|
||||
}
|
@ -49,9 +49,8 @@ public class ZizkiRipper extends AbstractHTMLRipper {
|
||||
public String getAlbumTitle(URL url) throws MalformedURLException {
|
||||
try {
|
||||
// Attempt to use album title as GID
|
||||
Element titleElement = getFirstPage().select("meta[name=description]").first();
|
||||
String title = titleElement.attr("content");
|
||||
title = title.substring(title.lastIndexOf('/') + 1);
|
||||
Element titleElement = getFirstPage().select("h1.title").first();
|
||||
String title = titleElement.text();
|
||||
|
||||
Element authorSpan = getFirstPage().select("span[class=creator]").first();
|
||||
String author = authorSpan.select("a").first().text();
|
||||
|
@ -1,67 +0,0 @@
|
||||
package com.rarchives.ripme.ripper.rippers.video;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.select.Elements;
|
||||
|
||||
import com.rarchives.ripme.ripper.VideoRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
|
||||
public class VineRipper extends VideoRipper {
|
||||
|
||||
private static final String HOST = "vine";
|
||||
|
||||
public VineRipper(URL url) throws IOException {
|
||||
super(url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHost() {
|
||||
return HOST;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canRip(URL url) {
|
||||
// https://vine.co/v/hiqQrP0eUZx
|
||||
Pattern p = Pattern.compile("^https?://[wm.]*vine\\.co/v/[a-zA-Z0-9]+.*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
return m.matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
public URL sanitizeURL(URL url) throws MalformedURLException {
|
||||
return url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getGID(URL url) throws MalformedURLException {
|
||||
Pattern p = Pattern.compile("^https?://[wm.]*vine\\.co/v/([a-zA-Z0-9]+).*$");
|
||||
Matcher m = p.matcher(url.toExternalForm());
|
||||
if (m.matches()) {
|
||||
return m.group(1);
|
||||
}
|
||||
|
||||
throw new MalformedURLException(
|
||||
"Expected vine format:"
|
||||
+ "vine.co/v/####"
|
||||
+ " Got: " + url);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rip() throws IOException {
|
||||
LOGGER.info(" Retrieving " + this.url.toExternalForm());
|
||||
Document doc = Http.url(this.url).get();
|
||||
Elements props = doc.select("meta[property=twitter:player:stream]");
|
||||
if (props.isEmpty()) {
|
||||
throw new IOException("Could not find meta property 'twitter:player:stream' at " + url);
|
||||
}
|
||||
String vidUrl = props.get(0).attr("content");
|
||||
addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url));
|
||||
waitForThreads();
|
||||
}
|
||||
}
|
@ -10,6 +10,7 @@ import java.awt.event.WindowAdapter;
|
||||
import java.awt.event.WindowEvent;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Array;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
@ -142,6 +143,10 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
|
||||
private ResourceBundle rb = Utils.getResourceBundle(null);
|
||||
|
||||
// All the langs ripme has been translated into
|
||||
private static String[] supportedLanges = new String[] {"en_US", "de_DE", "es_ES", "fr_CH", "kr_KR", "pt_PT",
|
||||
"fi_FI", "in_ID", "nl_NL", "porrisavvo_FI", "ru_RU"};
|
||||
|
||||
private void updateQueueLabel() {
|
||||
if (queueListModel.size() > 0) {
|
||||
optionQueue.setText(rb.getString("Queue") + " (" + queueListModel.size() + ")");
|
||||
@ -496,7 +501,7 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
configURLHistoryCheckbox = addNewCheckbox(rb.getString("remember.url.history"), "remember.url_history", true);
|
||||
|
||||
configLogLevelCombobox = new JComboBox<>(new String[] {"Log level: Error", "Log level: Warn", "Log level: Info", "Log level: Debug"});
|
||||
configSelectLangComboBox = new JComboBox<>(new String[] {"en_US", "de_DE", "es_ES", "fr_CH", "kr_KR", "pt_PT", "fi_FI", "in_ID", "porrisavvo_FI"});
|
||||
configSelectLangComboBox = new JComboBox<>(supportedLanges);
|
||||
configLogLevelCombobox.setSelectedItem(Utils.getConfigString("log.level", "Log level: Debug"));
|
||||
setLogLevel(configLogLevelCombobox.getSelectedItem().toString());
|
||||
configSaveDirLabel = new JLabel();
|
||||
@ -1206,6 +1211,12 @@ public final class MainWindow implements Runnable, RipStatusHandler {
|
||||
appendLog("Downloaded " + msg.getObject(), Color.GREEN);
|
||||
}
|
||||
break;
|
||||
case DOWNLOAD_COMPLETE_HISTORY:
|
||||
if (LOGGER.isEnabledFor(Level.INFO)) {
|
||||
appendLog("" + msg.getObject(), Color.GREEN);
|
||||
}
|
||||
break;
|
||||
|
||||
case DOWNLOAD_ERRORED:
|
||||
if (LOGGER.isEnabledFor(Level.ERROR)) {
|
||||
appendLog((String) msg.getObject(), Color.RED);
|
||||
|
@ -10,6 +10,7 @@ public class RipStatusMessage {
|
||||
DOWNLOAD_STARTED("Download Started"),
|
||||
DOWNLOAD_COMPLETE("Download Complete"),
|
||||
DOWNLOAD_ERRORED("Download Errored"),
|
||||
DOWNLOAD_COMPLETE_HISTORY("Download Complete History"),
|
||||
RIP_COMPLETE("Rip Complete"),
|
||||
DOWNLOAD_WARN("Download problem"),
|
||||
TOTAL_BYTES("Total bytes"),
|
||||
|
@ -20,7 +20,7 @@ import com.rarchives.ripme.utils.Utils;
|
||||
public class UpdateUtils {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||
private static final String DEFAULT_VERSION = "1.7.51";
|
||||
private static final String DEFAULT_VERSION = "1.7.60";
|
||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||
private static final String mainFileName = "ripme.jar";
|
||||
@ -128,9 +128,9 @@ public class UpdateUtils {
|
||||
logger.info("Found newer version: " + latestVersion);
|
||||
int result = JOptionPane.showConfirmDialog(
|
||||
null,
|
||||
"<html><font color=\"green\">New version (" + latestVersion + ") is available!</font>"
|
||||
+ "<br><br>Recent changes:" + changeList
|
||||
+ "<br><br>Do you want to download and run the newest version?</html>",
|
||||
String.format("<html><font color=\"green\">New version (%s) is available!</font>"
|
||||
+ "<br><br>Recent changes: %s"
|
||||
+ "<br><br>Do you want to download and run the newest version?</html>", latestVersion, changeList.replaceAll("\n", "")),
|
||||
"RipMe Updater",
|
||||
JOptionPane.YES_NO_OPTION);
|
||||
if (result != JOptionPane.YES_OPTION) {
|
||||
|
@ -3,9 +3,7 @@ package com.rarchives.ripme.utils;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@ -14,7 +12,7 @@ import com.rarchives.ripme.ripper.rippers.EroShareRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.EromeRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.ImgurRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.VidbleRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.GfycatRipper;
|
||||
import org.apache.commons.lang.math.NumberUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.jsoup.Jsoup;
|
||||
@ -56,6 +54,15 @@ public class RipUtils {
|
||||
} catch (IOException e) {
|
||||
logger.error("[!] Exception while loading album " + url, e);
|
||||
}
|
||||
} else if (url.getHost().endsWith("i.imgur.com") && url.toExternalForm().contains("gifv")) {
|
||||
// links to imgur gifvs
|
||||
try {
|
||||
result.add(new URL(url.toExternalForm().replaceAll(".gifv", ".mp4")));
|
||||
} catch (IOException e) {
|
||||
logger.info("Couldn't get gifv from " + url);
|
||||
}
|
||||
return result;
|
||||
|
||||
}
|
||||
else if (url.getHost().endsWith("gfycat.com")) {
|
||||
try {
|
||||
@ -88,6 +95,9 @@ public class RipUtils {
|
||||
logger.warn("Exception while retrieving eroshare page:", e);
|
||||
}
|
||||
return result;
|
||||
} else if (url.toExternalForm().contains("v.redd.it")) {
|
||||
result.add(url);
|
||||
return result;
|
||||
}
|
||||
|
||||
else if (url.toExternalForm().contains("erome.com")) {
|
||||
@ -279,4 +289,16 @@ public class RipUtils {
|
||||
}
|
||||
return url;
|
||||
}
|
||||
/**
|
||||
* Reads a cookie string (Key1=value1;key2=value2) from the config file and turns it into a hashmap
|
||||
* @return Map of cookies containing session data.
|
||||
*/
|
||||
public static Map<String, String> getCookiesFromString(String line) {
|
||||
Map<String,String> cookies = new HashMap<>();
|
||||
for (String pair : line.split(";")) {
|
||||
String[] kv = pair.split("=");
|
||||
cookies.put(kv[0], kv[1]);
|
||||
}
|
||||
return cookies;
|
||||
}
|
||||
}
|
||||
|
@ -1,87 +1,103 @@
|
||||
package com.rarchives.ripme.utils;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.*;
|
||||
import java.util.jar.JarEntry;
|
||||
import java.util.jar.JarFile;
|
||||
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import org.apache.commons.configuration.ConfigurationException;
|
||||
import org.apache.commons.configuration.PropertiesConfiguration;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
|
||||
import com.rarchives.ripme.ripper.AbstractRipper;
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.lang.reflect.Array;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.MissingResourceException;
|
||||
import java.util.ResourceBundle;
|
||||
import java.util.jar.JarEntry;
|
||||
import java.util.jar.JarFile;
|
||||
|
||||
/**
|
||||
* Common utility functions used in various places throughout the project.
|
||||
*/
|
||||
public class Utils {
|
||||
|
||||
private static final String RIP_DIRECTORY = "rips";
|
||||
private static final String configFile = "rip.properties";
|
||||
private static final String CONFIG_FILE = "rip.properties";
|
||||
private static final String OS = System.getProperty("os.name").toLowerCase();
|
||||
private static final Logger logger = Logger.getLogger(Utils.class);
|
||||
private static final Logger LOGGER = Logger.getLogger(Utils.class);
|
||||
private static final int SHORTENED_PATH_LENGTH = 12;
|
||||
|
||||
private static PropertiesConfiguration config;
|
||||
private static HashMap<String, HashMap<String, String>> cookieCache;
|
||||
|
||||
static {
|
||||
cookieCache = new HashMap<>();
|
||||
|
||||
try {
|
||||
String configPath = getConfigFilePath();
|
||||
File f = new File(configPath);
|
||||
if (!f.exists()) {
|
||||
File file = new File(configPath);
|
||||
|
||||
if (!file.exists()) {
|
||||
// Use default bundled with .jar
|
||||
configPath = configFile;
|
||||
configPath = CONFIG_FILE;
|
||||
}
|
||||
|
||||
config = new PropertiesConfiguration(configPath);
|
||||
logger.info("Loaded " + config.getPath());
|
||||
if (f.exists()) {
|
||||
LOGGER.info("Loaded " + config.getPath());
|
||||
|
||||
if (file.exists()) {
|
||||
// Config was loaded from file
|
||||
if ( !config.containsKey("twitter.auth")
|
||||
|| !config.containsKey("twitter.max_requests")
|
||||
|| !config.containsKey("tumblr.auth")
|
||||
|| !config.containsKey("error.skip404")
|
||||
|| !config.containsKey("gw.api")
|
||||
|| !config.containsKey("page.timeout")
|
||||
|| !config.containsKey("download.max_size")
|
||||
) {
|
||||
if (!config.containsKey("twitter.auth") || !config.containsKey("twitter.max_requests")
|
||||
|| !config.containsKey("tumblr.auth") || !config.containsKey("error.skip404")
|
||||
|| !config.containsKey("gw.api") || !config.containsKey("page.timeout")
|
||||
|| !config.containsKey("download.max_size")) {
|
||||
// Config is missing key fields
|
||||
// Need to reload the default config
|
||||
// See https://github.com/4pr0n/ripme/issues/158
|
||||
logger.warn("Config does not contain key fields, deleting old config");
|
||||
f.delete();
|
||||
config = new PropertiesConfiguration(configFile);
|
||||
logger.info("Loaded " + config.getPath());
|
||||
LOGGER.warn("Config does not contain key fields, deleting old config");
|
||||
file.delete();
|
||||
config = new PropertiesConfiguration(CONFIG_FILE);
|
||||
LOGGER.info("Loaded " + config.getPath());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("[!] Failed to load properties file from " + configFile, e);
|
||||
LOGGER.error("[!] Failed to load properties file from " + CONFIG_FILE, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the root rips directory.
|
||||
* @return
|
||||
* Root directory to save rips to.
|
||||
* @throws IOException
|
||||
*
|
||||
* @return Root directory to save rips to.
|
||||
*/
|
||||
public static File getWorkingDirectory() {
|
||||
String currentDir = ".";
|
||||
try {
|
||||
currentDir = new File(".").getCanonicalPath() + File.separator + RIP_DIRECTORY + File.separator;
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while finding working dir: ", e);
|
||||
LOGGER.error("Error while finding working dir: ", e);
|
||||
}
|
||||
|
||||
if (config != null) {
|
||||
currentDir = getConfigString("rips.directory", currentDir);
|
||||
}
|
||||
|
||||
File workingDir = new File(currentDir);
|
||||
if (!workingDir.exists()) {
|
||||
workingDir.mkdirs();
|
||||
@ -100,36 +116,46 @@ public class Utils {
|
||||
}
|
||||
|
||||
public static String[] getConfigStringArray(String key) {
|
||||
String[] s = config.getStringArray(key);
|
||||
if (s.length == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return s;
|
||||
}
|
||||
String[] configStringArray = config.getStringArray(key);
|
||||
|
||||
return configStringArray.length == 0 ? null : configStringArray;
|
||||
}
|
||||
|
||||
public static int getConfigInteger(String key, int defaultValue) {
|
||||
return config.getInt(key, defaultValue);
|
||||
}
|
||||
|
||||
public static boolean getConfigBoolean(String key, boolean defaultValue) {
|
||||
return config.getBoolean(key, defaultValue);
|
||||
}
|
||||
|
||||
public static List<String> getConfigList(String key) {
|
||||
List<String> result = new ArrayList<>();
|
||||
for (Object obj : config.getList(key, new ArrayList<String>())) {
|
||||
if (obj instanceof String) {
|
||||
result.add( (String) obj);
|
||||
result.add((String) obj);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
public static void setConfigBoolean(String key, boolean value) { config.setProperty(key, value); }
|
||||
public static void setConfigString(String key, String value) { config.setProperty(key, value); }
|
||||
public static void setConfigInteger(String key, int value) { config.setProperty(key, value); }
|
||||
|
||||
public static void setConfigBoolean(String key, boolean value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigString(String key, String value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigInteger(String key, int value) {
|
||||
config.setProperty(key, value);
|
||||
}
|
||||
|
||||
public static void setConfigList(String key, List<Object> list) {
|
||||
config.clearProperty(key);
|
||||
config.addProperty(key, list);
|
||||
}
|
||||
|
||||
public static void setConfigList(String key, Enumeration<Object> enumeration) {
|
||||
config.clearProperty(key);
|
||||
List<Object> list = new ArrayList<>();
|
||||
@ -142,9 +168,9 @@ public class Utils {
|
||||
public static void saveConfig() {
|
||||
try {
|
||||
config.save(getConfigFilePath());
|
||||
logger.info("Saved configuration to " + getConfigFilePath());
|
||||
LOGGER.info("Saved configuration to " + getConfigFilePath());
|
||||
} catch (ConfigurationException e) {
|
||||
logger.error("Error while saving configuration: ", e);
|
||||
LOGGER.error("Error while saving configuration: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,7 +202,6 @@ public class Utils {
|
||||
return System.getenv("LOCALAPPDATA") + File.separator + "ripme";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Gets the directory of where the config file is stored on a UNIX machine.
|
||||
*/
|
||||
@ -197,13 +222,14 @@ public class Utils {
|
||||
*/
|
||||
private static boolean portableMode() {
|
||||
try {
|
||||
File f = new File(new File(".").getCanonicalPath() + File.separator + configFile);
|
||||
if(f.exists() && !f.isDirectory()) {
|
||||
File file = new File(new File(".").getCanonicalPath() + File.separator + CONFIG_FILE);
|
||||
if (file.exists() && !file.isDirectory()) {
|
||||
return true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -229,6 +255,7 @@ public class Utils {
|
||||
return ".";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the url history file
|
||||
*/
|
||||
@ -248,26 +275,23 @@ public class Utils {
|
||||
* Gets the path to the configuration file.
|
||||
*/
|
||||
private static String getConfigFilePath() {
|
||||
return getConfigDir() + File.separator + configFile;
|
||||
return getConfigDir() + File.separator + CONFIG_FILE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the current working directory (CWD) from a File.
|
||||
* @param saveAs
|
||||
* The File path
|
||||
* @return
|
||||
* saveAs in relation to the CWD
|
||||
*
|
||||
* @param saveAs The File path
|
||||
* @return saveAs in relation to the CWD
|
||||
*/
|
||||
public static String removeCWD(File saveAs) {
|
||||
String prettySaveAs = saveAs.toString();
|
||||
try {
|
||||
prettySaveAs = saveAs.getCanonicalPath();
|
||||
String cwd = new File(".").getCanonicalPath() + File.separator;
|
||||
prettySaveAs = prettySaveAs.replace(
|
||||
cwd,
|
||||
"." + File.separator);
|
||||
prettySaveAs = prettySaveAs.replace(cwd, "." + File.separator);
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception: ", e);
|
||||
LOGGER.error("Exception: ", e);
|
||||
}
|
||||
return prettySaveAs;
|
||||
}
|
||||
@ -278,7 +302,6 @@ public class Utils {
|
||||
*
|
||||
* @param url The URL to filter/strip
|
||||
* @param parameter The parameter to strip
|
||||
*
|
||||
* @return The stripped URL
|
||||
*/
|
||||
public static String stripURLParameter(String url, String parameter) {
|
||||
@ -290,13 +313,13 @@ public class Utils {
|
||||
}
|
||||
|
||||
if (paramIndex > 0) {
|
||||
int nextParam = url.indexOf("&", paramIndex+1);
|
||||
int nextParam = url.indexOf('&', paramIndex + 1);
|
||||
if (nextParam != -1) {
|
||||
String c = "&";
|
||||
if (wasFirstParam) {
|
||||
c = "?";
|
||||
}
|
||||
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
|
||||
url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1, url.length());
|
||||
} else {
|
||||
url = url.substring(0, paramIndex);
|
||||
}
|
||||
@ -307,10 +330,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Removes the current working directory from a given filename
|
||||
* @param file
|
||||
* Path to the file
|
||||
* @return
|
||||
* 'file' without the leading current working directory
|
||||
*
|
||||
* @param file Path to the file
|
||||
* @return 'file' without the leading current working directory
|
||||
*/
|
||||
public static String removeCWD(String file) {
|
||||
return removeCWD(new File(file));
|
||||
@ -320,12 +342,11 @@ public class Utils {
|
||||
* Get a list of all Classes within a package.
|
||||
* Works with file system projects and jar files!
|
||||
* Borrowed from StackOverflow, but I don't have a link :[
|
||||
* @param pkgname
|
||||
* The name of the package
|
||||
* @return
|
||||
* List of classes within the package
|
||||
*
|
||||
* @param pkgname The name of the package
|
||||
* @return List of classes within the package
|
||||
*/
|
||||
public static ArrayList<Class<?>> getClassesForPackage(String pkgname) {
|
||||
public static List<Class<?>> getClassesForPackage(String pkgname) {
|
||||
ArrayList<Class<?>> classes = new ArrayList<>();
|
||||
String relPath = pkgname.replace('.', '/');
|
||||
URL resource = ClassLoader.getSystemClassLoader().getResource(relPath);
|
||||
@ -334,7 +355,8 @@ public class Utils {
|
||||
}
|
||||
|
||||
String fullPath = resource.getFile();
|
||||
File directory = null;
|
||||
File directory;
|
||||
|
||||
try {
|
||||
directory = new File(resource.toURI());
|
||||
} catch (URISyntaxException e) {
|
||||
@ -356,8 +378,7 @@ public class Utils {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// Load from JAR
|
||||
try {
|
||||
String jarPath = fullPath
|
||||
@ -376,7 +397,7 @@ public class Utils {
|
||||
try {
|
||||
classes.add(Class.forName(className));
|
||||
} catch (ClassNotFoundException e) {
|
||||
logger.error("ClassNotFoundException loading " + className);
|
||||
LOGGER.error("ClassNotFoundException loading " + className);
|
||||
jarFile.close(); // Resource leak fix?
|
||||
throw new RuntimeException("ClassNotFoundException loading " + className);
|
||||
}
|
||||
@ -384,20 +405,18 @@ public class Utils {
|
||||
}
|
||||
jarFile.close(); // Eclipse said not closing it would have a resource leak
|
||||
} catch (IOException e) {
|
||||
logger.error("Error while loading jar file:", e);
|
||||
LOGGER.error("Error while loading jar file:", e);
|
||||
throw new RuntimeException(pkgname + " (" + directory + ") does not appear to be a valid package", e);
|
||||
}
|
||||
}
|
||||
return classes;
|
||||
}
|
||||
|
||||
private static final int SHORTENED_PATH_LENGTH = 12;
|
||||
/**
|
||||
* Shortens the path to a file
|
||||
* @param path
|
||||
* String of the path to the file
|
||||
* @return
|
||||
* The simplified path to the file.
|
||||
*
|
||||
* @param path String of the path to the file
|
||||
* @return The simplified path to the file.
|
||||
*/
|
||||
public static String shortenPath(String path) {
|
||||
return shortenPath(new File(path));
|
||||
@ -405,10 +424,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Shortens the path to a file
|
||||
* @param file
|
||||
* File object that you want the shortened path of.
|
||||
* @return
|
||||
* The simplified path to the file.
|
||||
*
|
||||
* @param file File object that you want the shortened path of.
|
||||
* @return The simplified path to the file.
|
||||
*/
|
||||
public static String shortenPath(File file) {
|
||||
String path = removeCWD(file);
|
||||
@ -422,10 +440,9 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Sanitizes a string so that a filesystem can handle it
|
||||
* @param text
|
||||
* The text to be sanitized.
|
||||
* @return
|
||||
* The sanitized text.
|
||||
*
|
||||
* @param text The text to be sanitized.
|
||||
* @return The sanitized text.
|
||||
*/
|
||||
public static String filesystemSanitized(String text) {
|
||||
text = text.replaceAll("[^a-zA-Z0-9.-]", "_");
|
||||
@ -451,7 +468,7 @@ public class Utils {
|
||||
public static String getOriginalDirectory(String path) {
|
||||
|
||||
int index;
|
||||
if(isUnix() || isMacOS()) {
|
||||
if (isUnix() || isMacOS()) {
|
||||
index = path.lastIndexOf('/');
|
||||
} else {
|
||||
// current OS is windows - nothing to do here
|
||||
@ -459,17 +476,17 @@ public class Utils {
|
||||
}
|
||||
|
||||
String original = path; // needs to be checked if lowercase exists
|
||||
String lastPart = original.substring(index+1).toLowerCase(); // setting lowercase to check if it exists
|
||||
String lastPart = original.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists
|
||||
|
||||
// Get a List of all Directories and check its lowercase
|
||||
// if file exists return it
|
||||
File f = new File(path.substring(0, index));
|
||||
ArrayList<String> names = new ArrayList<String>(Arrays.asList(f.list()));
|
||||
File file = new File(path.substring(0, index));
|
||||
ArrayList<String> names = new ArrayList<>(Arrays.asList(file.list()));
|
||||
|
||||
for (String s : names) {
|
||||
if(s.toLowerCase().equals(lastPart)) {
|
||||
for (String name : names) {
|
||||
if (name.toLowerCase().equals(lastPart)) {
|
||||
// Building Path of existing file
|
||||
return path.substring(0, index) + File.separator + s;
|
||||
return path.substring(0, index) + File.separator + name;
|
||||
}
|
||||
}
|
||||
|
||||
@ -478,14 +495,13 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Converts an integer into a human readable string
|
||||
* @param bytes
|
||||
* Non-human readable integer.
|
||||
* @return
|
||||
* Human readable interpretation of a byte.
|
||||
*
|
||||
* @param bytes Non-human readable integer.
|
||||
* @return Human readable interpretation of a byte.
|
||||
*/
|
||||
public static String bytesToHumanReadable(int bytes) {
|
||||
float fbytes = (float) bytes;
|
||||
String[] mags = new String[] {"", "K", "M", "G", "T"};
|
||||
String[] mags = new String[]{"", "K", "M", "G", "T"};
|
||||
int magIndex = 0;
|
||||
while (fbytes >= 1024) {
|
||||
fbytes /= 1024;
|
||||
@ -496,6 +512,7 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Gets and returns a list of all the album rippers present in the "com.rarchives.ripme.ripper.rippers" package.
|
||||
*
|
||||
* @return List<String> of all album rippers present.
|
||||
*/
|
||||
public static List<String> getListOfAlbumRippers() throws Exception {
|
||||
@ -508,6 +525,7 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Gets and returns a list of all video rippers present in the "com.rarchives.rime.rippers.video" package
|
||||
*
|
||||
* @return List<String> of all the video rippers.
|
||||
*/
|
||||
public static List<String> getListOfVideoRippers() throws Exception {
|
||||
@ -520,8 +538,8 @@ public class Utils {
|
||||
|
||||
/**
|
||||
* Plays a sound from a file.
|
||||
* @param filename
|
||||
* Path to the sound file
|
||||
*
|
||||
* @param filename Path to the sound file
|
||||
*/
|
||||
public static void playSound(String filename) {
|
||||
URL resource = ClassLoader.getSystemClassLoader().getResource(filename);
|
||||
@ -535,7 +553,7 @@ public class Utils {
|
||||
clip.open(AudioSystem.getAudioInputStream(resource));
|
||||
clip.start();
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to play sound " + filename, e);
|
||||
LOGGER.error("Failed to play sound " + filename, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -544,27 +562,25 @@ public class Utils {
|
||||
*/
|
||||
public static void configureLogger() {
|
||||
LogManager.shutdown();
|
||||
String logFile;
|
||||
if (getConfigBoolean("log.save", false)) {
|
||||
logFile = "log4j.file.properties";
|
||||
}
|
||||
else {
|
||||
logFile = "log4j.properties";
|
||||
}
|
||||
InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile);
|
||||
String logFile = getConfigBoolean("log.save", false) ? "log4j.file.properties" : "log4j.properties";
|
||||
|
||||
try (InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile)) {
|
||||
if (stream == null) {
|
||||
PropertyConfigurator.configure("src/main/resources/" + logFile);
|
||||
} else {
|
||||
PropertyConfigurator.configure(stream);
|
||||
}
|
||||
logger.info("Loaded " + logFile);
|
||||
try {
|
||||
stream.close();
|
||||
} catch (IOException e) { }
|
||||
|
||||
LOGGER.info("Loaded " + logFile);
|
||||
} catch (IOException e) {
|
||||
LOGGER.error(e.getMessage(), e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets list of strings between two strings.
|
||||
*
|
||||
* @param fullText Text to retrieve from.
|
||||
* @param start String that precedes the desired text
|
||||
* @param finish String that follows the desired text
|
||||
@ -572,29 +588,29 @@ public class Utils {
|
||||
*/
|
||||
public static List<String> between(String fullText, String start, String finish) {
|
||||
List<String> result = new ArrayList<>();
|
||||
int i, j;
|
||||
i = fullText.indexOf(start);
|
||||
int i = fullText.indexOf(start);
|
||||
|
||||
while (i >= 0) {
|
||||
i += start.length();
|
||||
j = fullText.indexOf(finish, i);
|
||||
int j = fullText.indexOf(finish, i);
|
||||
if (j < 0) {
|
||||
break;
|
||||
}
|
||||
result.add(fullText.substring(i, j));
|
||||
i = fullText.indexOf(start, j + finish.length());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an URL query
|
||||
*
|
||||
* @param query
|
||||
* The query part of an URL
|
||||
* @param query The query part of an URL
|
||||
* @return The map of all query parameters
|
||||
*/
|
||||
public static Map<String,String> parseUrlQuery(String query) {
|
||||
Map<String,String> res = new HashMap<>();
|
||||
public static Map<String, String> parseUrlQuery(String query) {
|
||||
Map<String, String> res = new HashMap<>();
|
||||
|
||||
if (query.equals("")) {
|
||||
return res;
|
||||
@ -622,10 +638,8 @@ public class Utils {
|
||||
/**
|
||||
* Parses an URL query and returns the requested parameter's value
|
||||
*
|
||||
* @param query
|
||||
* The query part of an URL
|
||||
* @param key
|
||||
* The key whose value is requested
|
||||
* @param query The query part of an URL
|
||||
* @param key The key whose value is requested
|
||||
* @return The associated value or null if key wasn't found
|
||||
*/
|
||||
public static String parseUrlQuery(String query, String key) {
|
||||
@ -655,18 +669,13 @@ public class Utils {
|
||||
return null;
|
||||
}
|
||||
|
||||
private static HashMap<String, HashMap<String, String>> cookieCache;
|
||||
static {
|
||||
cookieCache = new HashMap<String, HashMap<String, String>>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets all the cookies from a certain host
|
||||
*/
|
||||
public static Map<String, String> getCookies(String host) {
|
||||
HashMap<String, String> domainCookies = cookieCache.get(host);
|
||||
if (domainCookies == null) {
|
||||
domainCookies = new HashMap<String, String>();
|
||||
domainCookies = new HashMap<>();
|
||||
String cookiesConfig = getConfigString("cookies." + host, "");
|
||||
for (String pair : cookiesConfig.split(" ")) {
|
||||
pair = pair.trim();
|
||||
@ -690,20 +699,68 @@ public class Utils {
|
||||
if (langSelect == null) {
|
||||
if (!getConfigString("lang", "").equals("")) {
|
||||
String[] langCode = getConfigString("lang", "").split("_");
|
||||
logger.info("Setting locale to " + getConfigString("lang", ""));
|
||||
LOGGER.info("Setting locale to " + getConfigString("lang", ""));
|
||||
return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
|
||||
}
|
||||
} else {
|
||||
String[] langCode = langSelect.split("_");
|
||||
logger.info("Setting locale to " + langSelect);
|
||||
LOGGER.info("Setting locale to " + langSelect);
|
||||
return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control());
|
||||
}
|
||||
try {
|
||||
logger.info("Setting locale to default");
|
||||
LOGGER.info("Setting locale to default");
|
||||
return ResourceBundle.getBundle("LabelsBundle", Locale.getDefault(), new UTF8Control());
|
||||
} catch (MissingResourceException e) {
|
||||
logger.info("Setting locale to root");
|
||||
LOGGER.info("Setting locale to root");
|
||||
return ResourceBundle.getBundle("LabelsBundle", Locale.ROOT);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats and reuturns the status text for rippers using the byte progress bar
|
||||
*
|
||||
* @param completionPercentage An int between 0 and 100 which repersents how close the download is to complete
|
||||
* @param bytesCompleted How many bytes have been downloaded
|
||||
* @param bytesTotal The total size of the file that is being downloaded
|
||||
* @return Returns the formatted status text for rippers using the byte progress bar
|
||||
*/
|
||||
public static String getByteStatusText(int completionPercentage, int bytesCompleted, int bytesTotal) {
|
||||
return String.valueOf(completionPercentage) +
|
||||
"% - " +
|
||||
Utils.bytesToHumanReadable(bytesCompleted) +
|
||||
" / " +
|
||||
Utils.bytesToHumanReadable(bytesTotal);
|
||||
}
|
||||
|
||||
public static String getEXTFromMagic(byte[] magic) {
|
||||
if (Arrays.equals(magic, new byte[]{-1, -40, -1, -37, 0, 0, 0, 0})) {
|
||||
return "jpeg";
|
||||
} else {
|
||||
LOGGER.info("Unknown magic number " + Arrays.toString(magic));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Checks if a file exists ignoring it's extension.
|
||||
// Code from: https://stackoverflow.com/a/17698068
|
||||
public static boolean fuzzyExists(File folder, String fileName) {
|
||||
if (!folder.exists()) {
|
||||
return false;
|
||||
}
|
||||
File[] listOfFiles = folder.listFiles();
|
||||
if (listOfFiles == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (File file : listOfFiles) {
|
||||
if (file.isFile()) {
|
||||
String[] filename = file.getName().split("\\.(?=[^\\.]+$)"); //split filename from it's extension
|
||||
if(filename[0].equalsIgnoreCase(fileName)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
37
src/main/resources/LabelsBundle_it_IT.properties
Normal file
37
src/main/resources/LabelsBundle_it_IT.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Log
|
||||
History = Cronologia
|
||||
created = creato
|
||||
modified = modificato
|
||||
Queue = Coda
|
||||
Configuration = Configurazione
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Versione Corrente
|
||||
check.for.updates = Controlla Aggiornamenti
|
||||
auto.update = Aggiornamento automatico?
|
||||
max.download.threads = Thread massini
|
||||
timeout.mill = Timeout (in millisecondi):
|
||||
retry.download.count = Tentativi di download
|
||||
overwrite.existing.files = Sovrascrivi file esistenti?
|
||||
sound.when.rip.completes = Suono al terminie
|
||||
preserve.order = Conserva ordine
|
||||
save.logs = Salva log
|
||||
notification.when.rip.starts = Notifica inizio
|
||||
save.urls.only = Salva solo URL
|
||||
save.album.titles = Salva titoli album
|
||||
autorip.from.clipboard = Scarica da appunti
|
||||
save.descriptions = Salva descrizioni
|
||||
prefer.mp4.over.gif = Preferisci MP4 a GIF
|
||||
restore.window.position = Ripristina posizione della finestra
|
||||
remember.url.history = Riscorda la cronologia degli URL
|
||||
loading.history.from = Carica cronologia da
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Caricamento cronologia da configurazione
|
||||
interrupted.while.waiting.to.rip.next.album = Interrotto mentre scaricavo album successivo
|
||||
inactive = Inattivo
|
||||
re-rip.checked = Re-rip selezionato
|
||||
remove = Rimuovi
|
||||
clear = Pulisci
|
37
src/main/resources/LabelsBundle_nl_NL.properties
Normal file
37
src/main/resources/LabelsBundle_nl_NL.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Logboek
|
||||
History = Geschiedenis
|
||||
created = gemaakt
|
||||
modified = aangepast
|
||||
Queue = Wachtrij
|
||||
Configuration = Configuratie
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Huidige versie
|
||||
check.for.updates = Controleer op updates
|
||||
auto.update = Auto-update?
|
||||
max.download.threads = Maximale downloadthreads
|
||||
timeout.mill = Timeout (in milliseconden):
|
||||
retry.download.count = Aantal keren opnieuw proberen te downloaden
|
||||
overwrite.existing.files = Bestaande bestanden overschrijven?
|
||||
sound.when.rip.completes = Geluid wanneer rip klaar is
|
||||
preserve.order = Volgorde behouden
|
||||
save.logs = Logbestanden opslaan
|
||||
notification.when.rip.starts = Notificatie wanneer rip start
|
||||
save.urls.only = Alleen URLs opslaan
|
||||
save.album.titles = Album titels opslaan
|
||||
autorip.from.clipboard = Rip automatisch van klembord
|
||||
save.descriptions = Beschrijvingen opslaan
|
||||
prefer.mp4.over.gif = Geef de voorkeur aan MP4 over GIF
|
||||
restore.window.position = Vensterpositie herstellen
|
||||
remember.url.history = URL geschiedenis onthouden
|
||||
loading.history.from = Geschiedenis laden van
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Geschiedenis laden van configuratie
|
||||
interrupted.while.waiting.to.rip.next.album = Onderbroken tijdens het wachten om volgend album te rippen
|
||||
inactive = Inactief
|
||||
re-rip.checked = Re-rip Gecheckt
|
||||
remove = Verwijderen
|
||||
clear = Opruimen
|
37
src/main/resources/LabelsBundle_ru_RU.properties
Normal file
37
src/main/resources/LabelsBundle_ru_RU.properties
Normal file
@ -0,0 +1,37 @@
|
||||
Log = Лог
|
||||
History = История
|
||||
created = создано
|
||||
modified = изменено
|
||||
Queue = Очередь
|
||||
Configuration = Настройки
|
||||
|
||||
# Keys for the Configuration menu
|
||||
|
||||
current.version = Текущая версия
|
||||
check.for.updates = Проверить обновления
|
||||
auto.update = Автообновление?
|
||||
max.download.threads = Максимальное число потоков:
|
||||
timeout.mill = Задержка (в миллисекундах):
|
||||
retry.download.count = Число повторов
|
||||
overwrite.existing.files = Перезаписать существующие файлы?
|
||||
sound.when.rip.completes = Звук при завершении
|
||||
preserve.order = Сохранять порядок
|
||||
save.logs = Сохранять логи
|
||||
notification.when.rip.starts = Уведомление при запуске
|
||||
save.urls.only = Сохранять только ссылки
|
||||
save.album.titles = Сохранять названия альбомов
|
||||
autorip.from.clipboard = Автоскачивание из буфера
|
||||
save.descriptions = Сохранять описания
|
||||
prefer.mp4.over.gif = Предпочесть MP4 вместо GIF
|
||||
restore.window.position = Восстановить положение окна
|
||||
remember.url.history = Запоминать историю запросов
|
||||
loading.history.from = Загрузить историю из
|
||||
|
||||
# Misc UI keys
|
||||
|
||||
loading.history.from.configuration = Загрузить историю из настроек
|
||||
interrupted.while.waiting.to.rip.next.album = Прервано во время ожидания скачивания следующего альбома
|
||||
inactive = Неактивно
|
||||
re-rip.checked = Перекачать выбранное
|
||||
remove = Удалить
|
||||
clear = Очистить
|
45
src/test/java/com/rarchives/ripme/tst/UtilsTest.java
Normal file
45
src/test/java/com/rarchives/ripme/tst/UtilsTest.java
Normal file
@ -0,0 +1,45 @@
|
||||
package com.rarchives.ripme.tst;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import com.rarchives.ripme.utils.Utils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class UtilsTest extends TestCase {
|
||||
|
||||
public void testGetEXTFromMagic() {
|
||||
assertEquals("jpeg", Utils.getEXTFromMagic(new byte[]{-1, -40, -1, -37, 0, 0, 0, 0}));
|
||||
}
|
||||
|
||||
public void testStripURLParameter() {
|
||||
assertEquals("http://example.tld/image.ext",
|
||||
Utils.stripURLParameter("http://example.tld/image.ext?param", "param"));
|
||||
}
|
||||
|
||||
public void testShortenPath() {
|
||||
String path = "/test/test/test/test/test/test/test/test/";
|
||||
assertEquals("/test/test1", Utils.shortenPath("/test/test1"));
|
||||
assertEquals("/test/test/t...st/test/test", Utils.shortenPath(path));
|
||||
}
|
||||
|
||||
public void testBytesToHumanReadable() {
|
||||
assertEquals("10.00iB", Utils.bytesToHumanReadable(10));
|
||||
assertEquals("1.00KiB", Utils.bytesToHumanReadable(1024));
|
||||
assertEquals("1.00MiB", Utils.bytesToHumanReadable(1024 * 1024));
|
||||
assertEquals("1.00GiB", Utils.bytesToHumanReadable(1024 * 1024 * 1024));
|
||||
}
|
||||
|
||||
public void testGetListOfAlbumRippers() throws Exception{
|
||||
assert(!Utils.getListOfAlbumRippers().isEmpty());
|
||||
}
|
||||
|
||||
public void testGetByteStatusText() {
|
||||
assertEquals("5% - 500.00iB / 97.66KiB", Utils.getByteStatusText(5, 500, 100000));
|
||||
}
|
||||
|
||||
public void testBetween() {
|
||||
assertEquals(Arrays.asList(" is a "), Utils.between("This is a test", "This", "test"));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.ArtStationRipper;
|
||||
|
||||
public class ArtStationRipperTest extends RippersTest {
|
||||
|
||||
public void testArtStationProjects() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("https://www.artstation.com/artwork/the-dwarf-mortar"));
|
||||
contentURLs.add(new URL("https://www.artstation.com/artwork/K36GR"));
|
||||
contentURLs.add(new URL("http://artstation.com/artwork/5JJQw"));
|
||||
for (URL url : contentURLs) {
|
||||
ArtStationRipper ripper = new ArtStationRipper(url);
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
||||
|
||||
public void testArtStationUserProfiles() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("https://www.artstation.com/heitoramatsu"));
|
||||
contentURLs.add(new URL("https://artstation.com/kuvshinov_ilya"));
|
||||
contentURLs.add(new URL("http://artstation.com/givemeapiggy"));
|
||||
for (URL url : contentURLs) {
|
||||
ArtStationRipper ripper = new ArtStationRipper(url);
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
||||
}
|
@ -6,8 +6,9 @@ import java.net.URL;
|
||||
import com.rarchives.ripme.ripper.rippers.BcfakesRipper;
|
||||
|
||||
public class BcfakesRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
BcfakesRipper ripper = new BcfakesRipper(new URL("http://www.bcfakes.com/celebritylist/olivia-wilde/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
// 21/06/2018 This test was disbaled as the site has experienced notable downtime
|
||||
// public void testRip() throws IOException {
|
||||
// BcfakesRipper ripper = new BcfakesRipper(new URL("http://www.bcfakes.com/celebritylist/olivia-wilde/"));
|
||||
// testRipper(ripper);
|
||||
// }
|
||||
}
|
||||
|
@ -10,4 +10,9 @@ public class CheveretoRipperTest extends RippersTest {
|
||||
CheveretoRipper ripper = new CheveretoRipper(new URL("http://tag-fox.com/album/Thjb"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testSubdirAlbum() throws IOException {
|
||||
CheveretoRipper ripper = new CheveretoRipper(new URL("https://kenzato.uk/booru/album/TnEc"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
||||
|
@ -2,24 +2,48 @@ package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.DeviantartRipper;
|
||||
import com.rarchives.ripme.utils.Http;
|
||||
import org.jsoup.nodes.Document;
|
||||
|
||||
public class DeviantartRipperTest extends RippersTest {
|
||||
public void testDeviantartAlbum() throws IOException {
|
||||
DeviantartRipper ripper = new DeviantartRipper(new URL("http://airgee.deviantart.com/gallery/"));
|
||||
DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/airgee/gallery/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testDeviantartNSFWAlbum() throws IOException {
|
||||
// NSFW gallery
|
||||
DeviantartRipper ripper = new DeviantartRipper(new URL("http://faterkcx.deviantart.com/gallery/"));
|
||||
DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/faterkcx/gallery/"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
URL url = new URL("http://airgee.deviantart.com/gallery/");
|
||||
URL url = new URL("https://www.deviantart.com/airgee/gallery/");
|
||||
DeviantartRipper ripper = new DeviantartRipper(url);
|
||||
assertEquals("airgee", ripper.getGID(url));
|
||||
}
|
||||
|
||||
public void testGetGalleryIDAndUsername() throws IOException {
|
||||
URL url = new URL("https://www.deviantart.com/airgee/gallery/");
|
||||
DeviantartRipper ripper = new DeviantartRipper(url);
|
||||
Document doc = Http.url(url).get();
|
||||
assertEquals("airgee", ripper.getUsername(doc));
|
||||
assertEquals("714589", ripper.getGalleryID(doc));
|
||||
}
|
||||
|
||||
public void testSanitizeURL() throws IOException {
|
||||
List<URL> urls = new ArrayList<URL>();
|
||||
urls.add(new URL("https://www.deviantart.com/airgee/"));
|
||||
urls.add(new URL("https://www.deviantart.com/airgee"));
|
||||
urls.add(new URL("https://www.deviantart.com/airgee/gallery/"));
|
||||
|
||||
for (URL url : urls) {
|
||||
DeviantartRipper ripper = new DeviantartRipper(url);
|
||||
assertEquals("https://www.deviantart.com/airgee/gallery/", ripper.sanitizeURL(url).toExternalForm());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,40 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.EromeRipper;
|
||||
|
||||
public class EromeRipperTest extends RippersTest {
|
||||
|
||||
public void testGetGIDProfilePage() throws IOException {
|
||||
URL url = new URL("https://www.erome.com/Jay-Jenna");
|
||||
EromeRipper ripper = new EromeRipper(url);
|
||||
assertEquals("Jay-Jenna", ripper.getGID(url));
|
||||
}
|
||||
|
||||
public void testGetGIDAlbum() throws IOException {
|
||||
URL url = new URL("https://www.erome.com/a/KbDAM1XT");
|
||||
EromeRipper ripper = new EromeRipper(url);
|
||||
assertEquals("KbDAM1XT", ripper.getGID(url));
|
||||
}
|
||||
|
||||
public void testGetAlbumsToQueue() throws IOException {
|
||||
URL url = new URL("https://www.erome.com/Jay-Jenna");
|
||||
EromeRipper ripper = new EromeRipper(url);
|
||||
assert(2 >= ripper.getAlbumsToQueue(ripper.getFirstPage()).size());
|
||||
}
|
||||
|
||||
public void testPageContainsAlbums() throws IOException {
|
||||
URL url = new URL("https://www.erome.com/Jay-Jenna");
|
||||
EromeRipper ripper = new EromeRipper(url);
|
||||
assert(ripper.pageContainsAlbums(url));
|
||||
assert(!ripper.pageContainsAlbums(new URL("https://www.erome.com/a/KbDAM1XT")));
|
||||
}
|
||||
|
||||
public void testRip() throws IOException {
|
||||
URL url = new URL("https://www.erome.com/a/4FqeUxil");
|
||||
EromeRipper ripper = new EromeRipper(url);
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -16,4 +16,12 @@ public class FuraffinityRipperTest extends RippersTest {
|
||||
FuraffinityRipper ripper = new FuraffinityRipper(url);
|
||||
assertEquals("mustardgas", ripper.getGID(url));
|
||||
}
|
||||
|
||||
public void testLogin() throws IOException {
|
||||
URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/");
|
||||
FuraffinityRipper ripper = new FuraffinityRipper(url);
|
||||
// Check if the first page contain the username of ripmes shared account
|
||||
Boolean containsUsername = ripper.getFirstPage().html().contains("ripmethrowaway");
|
||||
assert containsUsername;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.GfycatRipper;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
|
@ -0,0 +1,13 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.ImagearnRipper;
|
||||
|
||||
public class ImagearnRipperTest extends RippersTest {
|
||||
public void testImagearnRip() throws IOException {
|
||||
ImagearnRipper ripper = new ImagearnRipper(new URL("http://imagearn.com//gallery.php?id=578682"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.LoveromRipper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
public class LoveromRipperTest extends RippersTest{
|
||||
|
||||
}
|
@ -7,7 +7,13 @@ import com.rarchives.ripme.ripper.rippers.ManganeloRipper;
|
||||
|
||||
public class ManganeloRipperTest extends RippersTest {
|
||||
public void testRip() throws IOException {
|
||||
ManganeloRipper ripper = new ManganeloRipper(new URL("http://manganelo.com/manga/black_clover"));
|
||||
ManganeloRipper ripper = new ManganeloRipper(new URL("https://manganelo.com/manga/demonic_housekeeper"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
URL url = new URL("https://manganelo.com/manga/demonic_housekeeper");
|
||||
ManganeloRipper ripper = new ManganeloRipper(url);
|
||||
assertEquals("demonic_housekeeper", ripper.getGID(url));
|
||||
}
|
||||
}
|
||||
|
@ -21,4 +21,18 @@ public class MyhentaicomicsRipperTest extends RippersTest {
|
||||
// Test a tag
|
||||
assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/")));
|
||||
}
|
||||
|
||||
public void testGetAlbumsToQueue() throws IOException {
|
||||
URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/");
|
||||
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
|
||||
assertEquals(15, ripper.getAlbumsToQueue(ripper.getFirstPage()).size());
|
||||
}
|
||||
|
||||
public void testPageContainsAlbums() throws IOException {
|
||||
URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/");
|
||||
URL url2 = new URL("https://myhentaicomics.com/index.php/search?q=test");
|
||||
MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url);
|
||||
assertTrue(ripper.pageContainsAlbums(url));
|
||||
assertTrue(ripper.pageContainsAlbums(url2));
|
||||
}
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.PicstatioRipper;
|
||||
|
||||
public class PicstatioRipperTest extends RippersTest {
|
||||
|
||||
public void testRip() throws IOException {
|
||||
PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGID() throws IOException {
|
||||
PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers"));
|
||||
assertEquals("aerial-view-wallpapers", ripper.getGID(new URL("https://www.picstatio.com/aerial-view-wallpapers")));
|
||||
}
|
||||
}
|
@ -5,7 +5,6 @@ import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.RedditRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
|
||||
|
||||
public class RedditRipperTest extends RippersTest {
|
||||
// https://github.com/RipMeApp/ripme/issues/253 - Disabled tests: RedditRipperTest#testRedditSubreddit*Rip is flaky
|
||||
|
@ -7,10 +7,8 @@ import java.util.List;
|
||||
|
||||
import com.rarchives.ripme.ripper.VideoRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.PornhubRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.TwitchVideoRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.VineRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.XhamsterRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.XvideosRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.XvideosRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.YoupornRipper;
|
||||
import com.rarchives.ripme.ripper.rippers.video.YuvutuRipper;
|
||||
|
||||
@ -59,16 +57,6 @@ public class VideoRippersTest extends RippersTest {
|
||||
}
|
||||
}
|
||||
|
||||
public void testXvideosRipper() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("https://www.xvideos.com/video19719109/ziggy_star_ultra_hard_anal_pounding"));
|
||||
contentURLs.add(new URL("https://www.xvideos.com/video23515878/dee_s_pool_toys"));
|
||||
for (URL url : contentURLs) {
|
||||
XvideosRipper ripper = new XvideosRipper(url);
|
||||
videoTestHelper(ripper);
|
||||
}
|
||||
}
|
||||
|
||||
public void testPornhubRipper() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("https://www.pornhub.com/view_video.php?viewkey=ph5a329fa707269"));
|
||||
@ -78,18 +66,6 @@ public class VideoRippersTest extends RippersTest {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/RipMeApp/ripme/issues/186
|
||||
/*
|
||||
public void testVineRipper() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("https://vine.co/v/hiqQrP0eUZx"));
|
||||
for (URL url : contentURLs) {
|
||||
VineRipper ripper = new VineRipper(url);
|
||||
videoTestHelper(ripper);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
public void testYoupornRipper() throws IOException {
|
||||
List<URL> contentURLs = new ArrayList<>();
|
||||
contentURLs.add(new URL("http://www.youporn.com/watch/7669155/mrs-li-amateur-69-orgasm/?from=categ"));
|
||||
|
@ -17,6 +17,11 @@ public class XhamsterRipperTest extends RippersTest {
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testBrazilianXhamster() throws IOException {
|
||||
XhamsterRipper ripper = new XhamsterRipper(new URL("https://pt.xhamster.com/photos/gallery/silvana-7105696"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
public void testGetGID() throws IOException {
|
||||
URL url = new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664");
|
||||
XhamsterRipper ripper = new XhamsterRipper(url);
|
||||
|
@ -0,0 +1,16 @@
|
||||
package com.rarchives.ripme.tst.ripper.rippers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import com.rarchives.ripme.ripper.rippers.XvideosRipper;
|
||||
import com.rarchives.ripme.tst.ripper.rippers.RippersTest;
|
||||
|
||||
public class XvideosRipperTest extends RippersTest {
|
||||
|
||||
public void testXhamsterAlbum1() throws IOException {
|
||||
XvideosRipper ripper = new XvideosRipper(new URL("https://www.xvideos.com/video23515878/dee_s_pool_toys"));
|
||||
testRipper(ripper);
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user