Merge pull request #3 from RipMeApp/master

Update from original
This commit is contained in:
rephormat 2018-02-06 20:17:13 -06:00 committed by GitHub
commit 3a098109b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 349 additions and 21 deletions

View File

@ -4,7 +4,7 @@
<groupId>com.rarchives.ripme</groupId>
<artifactId>ripme</artifactId>
<packaging>jar</packaging>
<version>1.7.13</version>
<version>1.7.14</version>
<name>ripme</name>
<url>http://rip.rarchives.com</url>
<properties>

View File

@ -1,6 +1,7 @@
{
"latestVersion": "1.7.13",
"latestVersion": "1.7.14",
"changeList": [
"1.7.14: Tumblr API Key Choosing Fix; Make webtoons ripper download maximum quality images; Added twitch ripper; Added VSCO ripper; Fixed pornhub video ripper",
"1.7.13: disabled FuskatorRipperTest; Fixes xhamster.com video ripper; Add yuvutu.com ripper",
"1.7.12: Instagram ripper no longer 403s on certain images",
"1.7.11: Added gwarchives support to the cheveretoRipper; Gfycat Tests & Fix for bad reddit submissions; instagram ripper can now be made to skip videos",

View File

@ -34,23 +34,30 @@ public class TumblrRipper extends AlbumRipper {
private ALBUM_TYPE albumType;
private String subdomain, tagName, postNumber;
private static String TUMBLR_AUTH_CONFIG_KEY = "tumblr.auth";
private static final String TUMBLR_AUTH_CONFIG_KEY = "tumblr.auth";
private static boolean useDefaultApiKey = false; // fall-back for bad user-specified key
private static final List<String> apiKeys = Arrays.asList("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX",
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
private static final String API_KEY = apiKeys.get(new Random().nextInt(apiKeys.size()));
private static final List<String> APIKEYS = Arrays.asList("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX",
"FQrwZMCxVnzonv90rgNUJcAk4FpnoS0mYuSuGYqIpM2cFgp9L4",
"qpdkY6nMknksfvYAhf2xIHp0iNRLkMlcWShxqzXyFJRxIsZ1Zz");
private static int genNum = new Random().nextInt(APIKEYS.size());
private static final String API_KEY = APIKEYS.get(genNum); // Select random API key from APIKEYS
private static String getApiKey() {
/**
* Gets the API key.
* Chooses between default/included keys & user specified ones (from the config file).
* @return Tumblr API key
*/
public static String getApiKey() {
if (useDefaultApiKey || Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX").equals("JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX")) {
logger.info("Using api key: " + API_KEY);
return API_KEY;
} else {
logger.info("Using user tumblr.auth api key");
return Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
String userDefinedAPIKey = Utils.getConfigString(TUMBLR_AUTH_CONFIG_KEY, "JFNLu3CbINQjRdUvZibXW9VpSEVYYtiPJ86o8YmvgLZIoKyuNX");
logger.info("Using user tumblr.auth api key: " + userDefinedAPIKey);
return userDefinedAPIKey;
}
}
public TumblrRipper(URL url) throws IOException {
@ -64,7 +71,13 @@ public class TumblrRipper extends AlbumRipper {
public boolean canRip(URL url) {
return url.getHost().endsWith(DOMAIN);
}
/**
* Sanitizes URL.
* @param url URL to be sanitized.
* @return Sanitized URL
* @throws MalformedURLException
*/
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
String u = url.toExternalForm();

View File

@ -0,0 +1,190 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
* For ripping VSCO pictures.
*/
public class VscoRipper extends AbstractHTMLRipper{
private static final String DOMAIN = "vsco.co",
HOST = "vsco";
public VscoRipper(URL url) throws IOException{
super(url);
}
/**
* Checks to see if VscoRipper can Rip specified url.
* @param url
* @return True if can rip.
* False if cannot rip.
*/
@Override
public boolean canRip(URL url) {
if (!url.getHost().endsWith(DOMAIN)) {
return false;
}
// Ignores personalized things (e.g. login, feed) and store page
// Allows links to user profiles and links to images.
//@TODO: Add support for journals and collections.
String u = url.toExternalForm();
return !u.contains("/store/") ||
!u.contains("/feed/") ||
!u.contains("/login/") ||
!u.contains("/journal/") ||
!u.contains("/collection/")||
!u.contains("/images/") ||
u.contains("/media/");
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
//no sanitization needed.
return url;
}
/**
* <p>Gets the direct URL of full-sized image through the <meta> tag.</p>
* When expanding future functionality (e.g. support from journals), put everything into this method.
* @param page
* @return
*/
@Override
public List<String> getURLsFromPage(Document page){
List<String> toRip = new ArrayList<>();
//If user wanted to rip single image
if (url.toString().contains("/media/")){
try {
toRip.add(vscoImageToURL(url.toExternalForm()));
} catch (IOException ex) {
logger.debug("Failed to convert " + url.toString() + " to external form.");
}
} else {//want to rip a member profile
/*
String baseURL = "https://vsco.co";
//Find all the relative links, adds Base URL, then adds them to an ArrayList
List<URL> relativeLinks = new ArrayList<>();
Elements links = page.getElementsByTag("a");
for(Element link : links){
System.out.println(link.toString());
//if link includes "/media/", add it to the list
if (link.attr("href").contains("/media")) {
try {
String relativeURL = vscoImageToURL(link.attr("href"));
toRip.add(baseURL + relativeURL);
} catch (IOException ex) {
logger.debug("Could not add \"" + link.toString() + "\" to list for ripping.");
}
}
}
*/
logger.debug("Sorry, RipMe currently only supports ripping single images.");
}
return toRip;
}
private String vscoImageToURL(String url) throws IOException{
Document page = Jsoup.connect(url).userAgent(USER_AGENT)
.get();
//create Elements filled only with Elements with the "meta" tag.
Elements metaTags = page.getElementsByTag("meta");
String result = "";
for(Element metaTag : metaTags){
//find URL inside meta-tag with property of "og:image"
if (metaTag.attr("property").equals("og:image")){
String givenURL = metaTag.attr("content");
givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number)
result = givenURL;
logger.debug("Found image URL: " + givenURL);
break;//immediatly stop after getting URL (there should only be 1 image to be downloaded)
}
}
//Means website changed, things need to be fixed.
if (result.isEmpty()){
logger.error("Could not find image URL at: " + url);
}
return result;
}
@Override
public String getHost() {
return HOST;
}
@Override
public String getGID(URL url) throws MalformedURLException {
//Single Image
Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9]+)/media/([a-zA-Z0-9]+)");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()){
// Return the text contained between () in the regex
String user = m.group(1);
String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique
return user + "/" + imageNum;
}
//Member profile (Usernames should all be different, so this should work.
p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9]+)/images/[0-9]+");
m = p.matcher(url.toExternalForm());
if (m.matches()){
String user = m.group(1);
return user;
}
throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead");
}
@Override
public String getDomain() {
return DOMAIN;
}
@Override
public Document getFirstPage() throws IOException {
return Http.url(url).get();
}
@Override
public Document getNextPage(Document doc) throws IOException {
return super.getNextPage(doc);
}
@Override
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@ -71,7 +71,9 @@ public class WebtoonsRipper extends AbstractHTMLRipper {
public List<String> getURLsFromPage(Document doc) {
List<String> result = new ArrayList<String>();
for (Element elem : doc.select("div.viewer_img > img")) {
result.add(elem.attr("data-url"));
String origUrl = elem.attr("data-url");
String[] finalUrl = origUrl.split("\\?type");
result.add(finalUrl[0]);
}
return result;
}

View File

@ -68,11 +68,11 @@ public class PornhubRipper extends VideoRipper {
title = title.replaceAll("\\+", " ");
vidUrl = null;
for (String quality : new String[] {"quality_1080p", "quality_720p", "quality_480p", "quality_240p"}) {
Pattern pv = Pattern.compile("^.*var player_" + quality + " = '([^']*)'.*$", Pattern.DOTALL);
for (String quality : new String[] {"1080", "720", "480", "240"}) {
Pattern pv = Pattern.compile("\"quality\":\"" + quality + "\",\"videoUrl\":\"(.*?)\"");
Matcher mv = pv.matcher(html);
if (mv.matches()) {
vidUrl = mv.group(1);
if (mv.find()) {
vidUrl = mv.group(1).replace("\\/", "/");
break;
}
}

View File

@ -0,0 +1,80 @@
package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.VideoRipper;
import com.rarchives.ripme.utils.Http;
public class TwitchVideoRipper extends VideoRipper {
private static final String HOST = "twitch";
public TwitchVideoRipper(URL url) throws IOException {
super(url);
}
@Override
public String getHost() {
return HOST;
}
@Override
public boolean canRip(URL url) {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$");
Matcher m = p.matcher(url.toExternalForm());
return m.matches();
}
@Override
public URL sanitizeURL(URL url) throws MalformedURLException {
return url;
}
@Override
public String getGID(URL url) throws MalformedURLException {
Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
return m.group(m.groupCount());
}
throw new MalformedURLException(
"Expected Twitch.tv format:"
+ "https://clips.twitch.tv/####"
+ " Got: " + url);
}
@Override
public void rip() throws IOException {
logger.info("Retrieving " + this.url);
Document doc = Http.url(url).get();
//Get user friendly filename from page title
String title = doc.title();
Elements script = doc.select("script");
if (script.size() == 0) {
throw new IOException("Could not find script code at " + url);
}
//Regex assumes highest quality source is listed first
Pattern p = Pattern.compile("\"source\":\"(.*?)\"");
for (Element element : script) {
Matcher m = p.matcher(element.data());
if (m.find()){
String vidUrl = m.group(1);
addURLToDownload(new URL(vidUrl), HOST + "_" + title);
}
}
waitForThreads();
}
}

View File

@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
public class UpdateUtils {
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
private static final String DEFAULT_VERSION = "1.7.13";
private static final String DEFAULT_VERSION = "1.7.14";
private static final String REPO_NAME = "ripmeapp/ripme";
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
private static final String mainFileName = "ripme.jar";

View File

@ -7,6 +7,7 @@ import java.util.List;
import com.rarchives.ripme.ripper.VideoRipper;
import com.rarchives.ripme.ripper.rippers.video.PornhubRipper;
import com.rarchives.ripme.ripper.rippers.video.TwitchVideoRipper;
import com.rarchives.ripme.ripper.rippers.video.VineRipper;
import com.rarchives.ripme.ripper.rippers.video.XhamsterRipper;
import com.rarchives.ripme.ripper.rippers.video.XvideosRipper;
@ -37,6 +38,15 @@ public class VideoRippersTest extends RippersTest {
}
}
public void testTwitchVideoRipper() throws IOException {
List<URL> contentURLs = new ArrayList<>();
contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull"));
for (URL url : contentURLs) {
TwitchVideoRipper ripper = new TwitchVideoRipper(url);
videoTestHelper(ripper);
}
}
public void testXhamsterRipper() throws IOException {
List<URL> contentURLs = new ArrayList<>();
contentURLs.add(new URL("https://xhamster.com/videos/brazzers-busty-big-booty-milf-lisa-ann-fucks-her-masseur-1492828"));
@ -56,8 +66,6 @@ public class VideoRippersTest extends RippersTest {
}
}
// https://github.com/RipMeApp/ripme/issues/187
/*
public void testPornhubRipper() throws IOException {
List<URL> contentURLs = new ArrayList<>();
contentURLs.add(new URL("http://www.pornhub.com/view_video.php?viewkey=993166542"));
@ -66,7 +74,6 @@ public class VideoRippersTest extends RippersTest {
videoTestHelper(ripper);
}
}
*/
// https://github.com/RipMeApp/ripme/issues/186
/*

View File

@ -0,0 +1,30 @@
package com.rarchives.ripme.tst.ripper.rippers;
import com.rarchives.ripme.ripper.rippers.VscoRipper;
import java.io.IOException;
import java.net.URL;
public class VscoRipperTest extends RippersTest {
/**
* Testing Rip.
* @throws IOException
*/
public void testSingleImageRip() throws IOException{
VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/minijello/media/571cd612542220261a123441"));
testRipper(ripper);
}
/**
* Make sure it names the folder something sensible.
* @throws IOException
*/
public void testGetGID() throws IOException{
URL url = new URL("https://vsco.co/minijello/media/571cd612542220261a123441");
VscoRipper ripper = new VscoRipper(url);
assertEquals("Failed to get GID", "minijello/571cd", ripper.getGID(url));
}
}

View File

@ -10,4 +10,9 @@ public class WebtoonsRipperTest extends RippersTest {
WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/my-boo/ep-33/viewer?title_no=1185&episode_no=33"));
testRipper(ripper);
}
public void testWebtoonsType() throws IOException {
WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145"));
testRipper(ripper);
}
}