commit
7978f7cc46
@ -36,6 +36,9 @@ For information about running the `.jar` file, see [the How To Run wiki](https:/
|
|||||||
|
|
||||||
* Quickly downloads all images in an online album (see supported sites below)
|
* Quickly downloads all images in an online album (see supported sites below)
|
||||||
* Easily re-rip albums to fetch new content
|
* Easily re-rip albums to fetch new content
|
||||||
|
* Built in updater
|
||||||
|
* Can rip images from tumblr in the size they were uploaded in [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#tumblrget_raw_image)
|
||||||
|
* Skips already downloaded images by default
|
||||||
|
|
||||||
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
## [List of Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
|
||||||
|
|
||||||
|
2
pom.xml
2
pom.xml
@ -4,7 +4,7 @@
|
|||||||
<groupId>com.rarchives.ripme</groupId>
|
<groupId>com.rarchives.ripme</groupId>
|
||||||
<artifactId>ripme</artifactId>
|
<artifactId>ripme</artifactId>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<version>1.7.24</version>
|
<version>1.7.27</version>
|
||||||
<name>ripme</name>
|
<name>ripme</name>
|
||||||
<url>http://rip.rarchives.com</url>
|
<url>http://rip.rarchives.com</url>
|
||||||
<properties>
|
<properties>
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
{
|
{
|
||||||
"latestVersion": "1.7.24",
|
"latestVersion": "1.7.27",
|
||||||
"changeList": [
|
"changeList": [
|
||||||
|
"1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
|
||||||
|
"1.7.26: fixed instagram ripper",
|
||||||
|
"1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
|
||||||
"1.7.24: Added sta.sh ripper; Added sinfest.com ripper; Added femjoyhunter.com ripper; Disabled flaky unit tests",
|
"1.7.24: Added sta.sh ripper; Added sinfest.com ripper; Added femjoyhunter.com ripper; Disabled flaky unit tests",
|
||||||
"1.7.23: Fixed xvideos ripper; InstagramRipper now works with lastseenfeature",
|
"1.7.23: Fixed xvideos ripper; InstagramRipper now works with lastseenfeature",
|
||||||
"1.7.22: Added func to normalize urls before reading from/writing to url history file; last seen feature now works with instagram",
|
"1.7.22: Added func to normalize urls before reading from/writing to url history file; last seen feature now works with instagram",
|
||||||
|
@ -174,7 +174,7 @@ public abstract class AbstractRipper
|
|||||||
* URL of the file
|
* URL of the file
|
||||||
* @param saveAs
|
* @param saveAs
|
||||||
* Path of the local file to save the content to.
|
* Path of the local file to save the content to.
|
||||||
* @return True on success, flase on failure.
|
* @return True on success, false on failure.
|
||||||
*/
|
*/
|
||||||
public abstract boolean addURLToDownload(URL url, File saveAs);
|
public abstract boolean addURLToDownload(URL url, File saveAs);
|
||||||
|
|
||||||
@ -206,11 +206,13 @@ public abstract class AbstractRipper
|
|||||||
* The HTTP referrer to use while downloading this file.
|
* The HTTP referrer to use while downloading this file.
|
||||||
* @param cookies
|
* @param cookies
|
||||||
* The cookies to send to the server while downloading this file.
|
* The cookies to send to the server while downloading this file.
|
||||||
|
* @param fileName
|
||||||
|
* The name that file will be written to
|
||||||
* @return
|
* @return
|
||||||
* True if downloaded successfully
|
* True if downloaded successfully
|
||||||
* False if failed to download
|
* False if failed to download
|
||||||
*/
|
*/
|
||||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies) {
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies, String fileName) {
|
||||||
// Don't re-add the url if it was downloaded in a previous rip
|
// Don't re-add the url if it was downloaded in a previous rip
|
||||||
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
|
||||||
if (hasDownloadedURL(url.toExternalForm())) {
|
if (hasDownloadedURL(url.toExternalForm())) {
|
||||||
@ -225,9 +227,18 @@ public abstract class AbstractRipper
|
|||||||
logger.debug("Ripper has been stopped");
|
logger.debug("Ripper has been stopped");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies);
|
logger.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName);
|
||||||
String saveAs = url.toExternalForm();
|
String saveAs;
|
||||||
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
if (fileName != null) {
|
||||||
|
saveAs = fileName;
|
||||||
|
// Get the extension of the file
|
||||||
|
String extension = url.toExternalForm().substring(url.toExternalForm().lastIndexOf(".") + 1);
|
||||||
|
saveAs = saveAs + "." + extension;
|
||||||
|
} else {
|
||||||
|
saveAs = url.toExternalForm();
|
||||||
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
||||||
|
}
|
||||||
|
|
||||||
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
||||||
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
||||||
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
||||||
@ -274,7 +285,11 @@ public abstract class AbstractRipper
|
|||||||
* @return True on success, flase on failure.
|
* @return True on success, flase on failure.
|
||||||
*/
|
*/
|
||||||
protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
|
||||||
return addURLToDownload(url, prefix, subdirectory, null, null);
|
return addURLToDownload(url, prefix, subdirectory, null, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map<String, String> cookies) {
|
||||||
|
return addURLToDownload(url, prefix, subdirectory, referrer, cookies, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -290,6 +305,8 @@ public abstract class AbstractRipper
|
|||||||
// Use empty subdirectory
|
// Use empty subdirectory
|
||||||
return addURLToDownload(url, prefix, "");
|
return addURLToDownload(url, prefix, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for downloading threads to complete.
|
* Waits for downloading threads to complete.
|
||||||
*/
|
*/
|
||||||
|
@ -11,6 +11,7 @@ import java.util.Map;
|
|||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.utils.Utils;
|
||||||
import org.jsoup.Connection.Response;
|
import org.jsoup.Connection.Response;
|
||||||
import org.jsoup.nodes.Document;
|
import org.jsoup.nodes.Document;
|
||||||
import org.jsoup.nodes.Element;
|
import org.jsoup.nodes.Element;
|
||||||
@ -125,7 +126,11 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
logger.info("Retrieving full-size image location from " + imageHref);
|
logger.info("Retrieving full-size image location from " + imageHref);
|
||||||
image = getFullSizeImage(imageHref);
|
image = getFullSizeImage(imageHref);
|
||||||
URL imageUrl = new URL(image);
|
URL imageUrl = new URL(image);
|
||||||
addURLToDownload(imageUrl, getPrefix(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
|
if (Utils.getConfigBoolean("8muses.use_short_names", false)) {
|
||||||
|
addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "");
|
||||||
|
} else {
|
||||||
|
addURLToDownload(imageUrl, getPrefixLong(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies);
|
||||||
|
}
|
||||||
// X is our page index
|
// X is our page index
|
||||||
x++;
|
x++;
|
||||||
|
|
||||||
@ -178,8 +183,11 @@ public class EightmusesRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
|
addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public String getPrefixLong(int index) {
|
||||||
public String getPrefix(int index) {
|
|
||||||
return String.format("%03d_", index);
|
return String.format("%03d_", index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getPrefixShort(int index) {
|
||||||
|
return String.format("%03d", index);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
package com.rarchives.ripme.ripper.rippers;
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.net.URLConnection;
|
||||||
import java.time.*;
|
import java.time.*;
|
||||||
import java.time.format.DateTimeFormatter;
|
import java.time.format.DateTimeFormatter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -25,6 +28,9 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
|
|
||||||
public class InstagramRipper extends AbstractHTMLRipper {
|
public class InstagramRipper extends AbstractHTMLRipper {
|
||||||
String nextPageID = "";
|
String nextPageID = "";
|
||||||
|
private String qHash;
|
||||||
|
private boolean rippingTag = false;
|
||||||
|
private String tagName;
|
||||||
|
|
||||||
private String userID;
|
private String userID;
|
||||||
|
|
||||||
@ -130,13 +136,31 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
p = Pattern.compile("^https?://www.instagram.com/explore/tags/([^/]+)/?");
|
p = Pattern.compile("^https?://www.instagram.com/explore/tags/([^/]+)/?");
|
||||||
m = p.matcher(url.toExternalForm());
|
m = p.matcher(url.toExternalForm());
|
||||||
if (m.matches()) {
|
if (m.matches()) {
|
||||||
|
rippingTag = true;
|
||||||
|
tagName = m.group(1);
|
||||||
return m.group(1);
|
return m.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new MalformedURLException("Unable to find user in " + url);
|
throw new MalformedURLException("Unable to find user in " + url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String stripHTMLTags(String t) {
|
||||||
|
t = t.replaceAll("<html>\n" +
|
||||||
|
" <head></head>\n" +
|
||||||
|
" <body>", "");
|
||||||
|
t.replaceAll("</body>\n" +
|
||||||
|
"</html>", "");
|
||||||
|
t = t.replaceAll("\n", "");
|
||||||
|
t = t.replaceAll("=\"\"", "");
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private JSONObject getJSONFromPage(Document firstPage) throws IOException {
|
private JSONObject getJSONFromPage(Document firstPage) throws IOException {
|
||||||
|
// Check if this page is HTML + JSON or jsut json
|
||||||
|
if (!firstPage.html().contains("window._sharedData =")) {
|
||||||
|
return new JSONObject(stripHTMLTags(firstPage.html()));
|
||||||
|
}
|
||||||
String jsonText = "";
|
String jsonText = "";
|
||||||
try {
|
try {
|
||||||
for (Element script : firstPage.select("script[type=text/javascript]")) {
|
for (Element script : firstPage.select("script[type=text/javascript]")) {
|
||||||
@ -153,8 +177,10 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Document getFirstPage() throws IOException {
|
public Document getFirstPage() throws IOException {
|
||||||
userID = getGID(url);
|
Document p = Http.url(url).get();
|
||||||
return Http.url(url).get();
|
// Get the query hash so we can download the next page
|
||||||
|
qHash = getQHash(p);
|
||||||
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getVideoFromPage(String videoID) {
|
private String getVideoFromPage(String videoID) {
|
||||||
@ -210,30 +236,47 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
|
|
||||||
if (!url.toExternalForm().contains("/p/")) {
|
if (!url.toExternalForm().contains("/p/")) {
|
||||||
JSONArray datas = new JSONArray();
|
JSONArray datas = new JSONArray();
|
||||||
try {
|
if (!rippingTag) {
|
||||||
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
// This first try only works on data from the first page
|
||||||
datas = profilePage.getJSONObject(0).getJSONObject("user").getJSONObject("media").getJSONArray("nodes");
|
try {
|
||||||
} catch (JSONException e) {
|
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
||||||
// Handle hashtag pages
|
userID = profilePage.getJSONObject(0).getString("logging_page_id").replaceAll("profilePage_", "");
|
||||||
datas = json.getJSONObject("entry_data").getJSONArray("TagPage").getJSONObject(0)
|
datas = profilePage.getJSONObject(0).getJSONObject("graphql").getJSONObject("user")
|
||||||
.getJSONObject("tag").getJSONObject("media").getJSONArray("nodes");
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||||
|
} catch (JSONException e) {
|
||||||
|
datas = json.getJSONObject("data").getJSONObject("user")
|
||||||
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
JSONArray tagPage = json.getJSONObject("entry_data").getJSONArray("TagPage");
|
||||||
|
datas = tagPage.getJSONObject(0).getJSONObject("graphql").getJSONObject("hashtag")
|
||||||
|
.getJSONObject("edge_hashtag_to_media").getJSONArray("edges");
|
||||||
|
} catch (JSONException e) {
|
||||||
|
datas = json.getJSONObject("data").getJSONObject("hashtag").getJSONObject("edge_hashtag_to_media")
|
||||||
|
.getJSONArray("edges");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < datas.length(); i++) {
|
for (int i = 0; i < datas.length(); i++) {
|
||||||
JSONObject data = (JSONObject) datas.get(i);
|
JSONObject data = (JSONObject) datas.get(i);
|
||||||
Long epoch = data.getLong("date");
|
data = data.getJSONObject("node");
|
||||||
|
Long epoch = data.getLong("taken_at_timestamp");
|
||||||
Instant instant = Instant.ofEpochSecond(epoch);
|
Instant instant = Instant.ofEpochSecond(epoch);
|
||||||
String image_date = DateTimeFormatter.ofPattern("yyyy_MM_dd_hh:mm_").format(ZonedDateTime.ofInstant(instant, ZoneOffset.UTC));
|
String image_date = DateTimeFormatter.ofPattern("yyyy_MM_dd_hh:mm_").format(ZonedDateTime.ofInstant(instant, ZoneOffset.UTC));
|
||||||
if (data.getString("__typename").equals("GraphSidecar")) {
|
// It looks like tag pages don't have the __typename key
|
||||||
try {
|
if (!rippingTag) {
|
||||||
Document slideShowDoc = Http.url(new URL ("https://www.instagram.com/p/" + data.getString("code"))).get();
|
if (data.getString("__typename").equals("GraphSidecar")) {
|
||||||
List<String> toAdd = getPostsFromSinglePage(slideShowDoc);
|
try {
|
||||||
for (int slideShowInt=0; slideShowInt<toAdd.size(); slideShowInt++) {
|
Document slideShowDoc = Http.url(new URL("https://www.instagram.com/p/" + data.getString("shortcode"))).get();
|
||||||
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("code"));
|
List<String> toAdd = getPostsFromSinglePage(slideShowDoc);
|
||||||
|
for (int slideShowInt = 0; slideShowInt < toAdd.size(); slideShowInt++) {
|
||||||
|
addURLToDownload(new URL(toAdd.get(slideShowInt)), image_date + data.getString("shortcode"));
|
||||||
|
}
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.error("Unable to download slide show, URL was malformed");
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.error("Unable to download slide show");
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException e) {
|
|
||||||
logger.error("Unable to download slide show, URL was malformed");
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.error("Unable to download slide show");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
@ -246,9 +289,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(new URL(getOriginalUrl(data.getString("thumbnail_src"))), image_date);
|
addURLToDownload(new URL(getOriginalUrl(data.getString("thumbnail_src"))), image_date);
|
||||||
} else {
|
} else {
|
||||||
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
if (!Utils.getConfigBoolean("instagram.download_images_only", false)) {
|
||||||
addURLToDownload(new URL(getVideoFromPage(data.getString("code"))), image_date);
|
addURLToDownload(new URL(getVideoFromPage(data.getString("shortcode"))), image_date);
|
||||||
} else {
|
} else {
|
||||||
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping video " + data.getString("code"));
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping video " + data.getString("shortcode"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
@ -274,19 +317,13 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
public Document getNextPage(Document doc) throws IOException {
|
public Document getNextPage(Document doc) throws IOException {
|
||||||
Document toreturn;
|
Document toreturn;
|
||||||
if (!nextPageID.equals("") && !isThisATest()) {
|
if (!nextPageID.equals("") && !isThisATest()) {
|
||||||
if (url.toExternalForm().contains("/tags/")) {
|
if (rippingTag) {
|
||||||
try {
|
try {
|
||||||
// Sleep for a while to avoid a ban
|
|
||||||
sleep(2500);
|
sleep(2500);
|
||||||
if (url.toExternalForm().substring(url.toExternalForm().length() - 1).equals("/")) {
|
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash +
|
||||||
toreturn = Http.url(url.toExternalForm() + "?max_id=" + nextPageID).get();
|
"&variables={\"tag_name\":\"" + tagName + "\",\"first\":4,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
|
||||||
} else {
|
// Sleep for a while to avoid a ban
|
||||||
toreturn = Http.url(url.toExternalForm() + "/?max_id=" + nextPageID).get();
|
|
||||||
}
|
|
||||||
logger.info(toreturn.html());
|
logger.info(toreturn.html());
|
||||||
if (!hasImage(toreturn)) {
|
|
||||||
throw new IOException("No more pages");
|
|
||||||
}
|
|
||||||
return toreturn;
|
return toreturn;
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@ -297,8 +334,9 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
try {
|
try {
|
||||||
// Sleep for a while to avoid a ban
|
// Sleep for a while to avoid a ban
|
||||||
sleep(2500);
|
sleep(2500);
|
||||||
toreturn = Http.url("https://www.instagram.com/" + userID + "/?max_id=" + nextPageID).get();
|
toreturn = Http.url("https://www.instagram.com/graphql/query/?query_hash=" + qHash + "&variables=" +
|
||||||
if (!hasImage(toreturn)) {
|
"{\"id\":\"" + userID + "\",\"first\":100,\"after\":\"" + nextPageID + "\"}").ignoreContentType().get();
|
||||||
|
if (!pageHasImages(toreturn)) {
|
||||||
throw new IOException("No more pages");
|
throw new IOException("No more pages");
|
||||||
}
|
}
|
||||||
return toreturn;
|
return toreturn;
|
||||||
@ -315,19 +353,55 @@ public class InstagramRipper extends AbstractHTMLRipper {
|
|||||||
addURLToDownload(url);
|
addURLToDownload(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean hasImage(Document doc) {
|
private boolean pageHasImages(Document doc) {
|
||||||
try {
|
logger.info("BAD DATA: " + stripHTMLTags(doc.html()));
|
||||||
JSONObject json = getJSONFromPage(doc);
|
JSONObject json = new JSONObject(stripHTMLTags(doc.html()));
|
||||||
JSONArray profilePage = json.getJSONObject("entry_data").getJSONArray("ProfilePage");
|
int numberOfImages = json.getJSONObject("data").getJSONObject("user")
|
||||||
JSONArray datas = profilePage.getJSONObject(0).getJSONObject("user").getJSONObject("media").getJSONArray("nodes");
|
.getJSONObject("edge_owner_to_timeline_media").getJSONArray("edges").length();
|
||||||
logger.info(datas.length());
|
if (numberOfImages == 0) {
|
||||||
if (datas.length() == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} catch (IOException e) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getQHash(Document doc) {
|
||||||
|
String jsFileURL = "https://www.instagram.com" + doc.select("link[rel=preload]").attr("href");
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
Document jsPage;
|
||||||
|
try {
|
||||||
|
// We can't use Jsoup here because it won't download a non-html file larger than a MB
|
||||||
|
// even if you set maxBodySize to 0
|
||||||
|
URLConnection connection = new URL(jsFileURL).openConnection();
|
||||||
|
BufferedReader in = new BufferedReader(new InputStreamReader(connection.getInputStream()));
|
||||||
|
String line;
|
||||||
|
while ((line = in.readLine()) != null) {
|
||||||
|
sb.append(line);
|
||||||
|
}
|
||||||
|
in.close();
|
||||||
|
|
||||||
|
} catch (MalformedURLException e) {
|
||||||
|
logger.info("Unable to get query_hash, " + jsFileURL + " is a malformed URL");
|
||||||
|
return null;
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info("Unable to get query_hash");
|
||||||
|
logger.info(e.getMessage());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (!rippingTag) {
|
||||||
|
Pattern jsP = Pattern.compile("o},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
Matcher m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Pattern jsP = Pattern.compile("return e.tagMedia.byTagName.get\\(t\\).pagination},queryId:.([a-zA-Z0-9]+).");
|
||||||
|
Matcher m = jsP.matcher(sb.toString());
|
||||||
|
if (m.find()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.info("Could not find query_hash on " + jsFileURL);
|
||||||
|
return null;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,108 @@
|
|||||||
|
package com.rarchives.ripme.ripper.rippers;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLEncoder;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ui.RipStatusMessage;
|
||||||
|
import org.json.JSONArray;
|
||||||
|
import org.json.JSONObject;
|
||||||
|
import org.jsoup.Connection;
|
||||||
|
import org.jsoup.Jsoup;
|
||||||
|
import org.jsoup.nodes.Document;
|
||||||
|
import org.jsoup.nodes.Element;
|
||||||
|
|
||||||
|
|
||||||
|
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
|
||||||
|
import com.rarchives.ripme.utils.Http;
|
||||||
|
|
||||||
|
public class TsuminoRipper extends AbstractHTMLRipper {
|
||||||
|
private Map<String,String> cookies = new HashMap<>();
|
||||||
|
|
||||||
|
public TsuminoRipper(URL url) throws IOException {
|
||||||
|
super(url);
|
||||||
|
}
|
||||||
|
|
||||||
|
private JSONArray getPageUrls() {
|
||||||
|
String postURL = "http://www.tsumino.com/Read/Load";
|
||||||
|
try {
|
||||||
|
// This sessionId will expire and need to be replaced
|
||||||
|
cookies.put("ASP.NET_SessionId","c4rbzccf0dvy3e0cloolmlkq");
|
||||||
|
logger.info(cookies);
|
||||||
|
Document doc = Jsoup.connect(postURL).data("q", getAlbumID()).userAgent(USER_AGENT).cookies(cookies).referrer("http://www.tsumino.com/Read/View/" + getAlbumID()).post();
|
||||||
|
String jsonInfo = doc.html().replaceAll("<html>","").replaceAll("<head></head>", "").replaceAll("<body>", "").replaceAll("</body>", "")
|
||||||
|
.replaceAll("</html>", "").replaceAll("\n", "");
|
||||||
|
logger.info(jsonInfo);
|
||||||
|
JSONObject json = new JSONObject(jsonInfo);
|
||||||
|
logger.info(json.getJSONArray("reader_page_urls"));
|
||||||
|
return json.getJSONArray("reader_page_urls");
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.info(e);
|
||||||
|
sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED, "Unable to download album, please compete the captcha at http://www.tsumino.com/Read/Auth/"
|
||||||
|
+ getAlbumID() + " and try again");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "tsumino";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return "tsumino.com";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getGID(URL url) throws MalformedURLException {
|
||||||
|
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/([a-zA-Z0-9_-]*)");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1) + "_" + m.group(2);
|
||||||
|
}
|
||||||
|
throw new MalformedURLException("Expected tsumino URL format: " +
|
||||||
|
"tsumino.com/Book/Info/ID/TITLE - got " + url + " instead");
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getAlbumID() {
|
||||||
|
Pattern p = Pattern.compile("https?://www.tsumino.com/Book/Info/([0-9]+)/\\S*");
|
||||||
|
Matcher m = p.matcher(url.toExternalForm());
|
||||||
|
if (m.matches()) {
|
||||||
|
return m.group(1);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Document getFirstPage() throws IOException {
|
||||||
|
Connection.Response resp = Http.url(url).response();
|
||||||
|
cookies.putAll(resp.cookies());
|
||||||
|
logger.info(resp.parse());
|
||||||
|
return resp.parse();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getURLsFromPage(Document doc) {
|
||||||
|
JSONArray imageIds = getPageUrls();
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
for (int i = 0; i < imageIds.length(); i++) {
|
||||||
|
result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void downloadURL(URL url, int index) {
|
||||||
|
sleep(1000);
|
||||||
|
addURLToDownload(url, getPrefix(index));
|
||||||
|
}
|
||||||
|
}
|
@ -37,6 +37,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
private static List<String> explicit_domains = Arrays.asList(
|
private static List<String> explicit_domains = Arrays.asList(
|
||||||
"www.totempole666.com",
|
"www.totempole666.com",
|
||||||
"buttsmithy.com",
|
"buttsmithy.com",
|
||||||
|
"incase.buttsmithy.com",
|
||||||
"themonsterunderthebed.net",
|
"themonsterunderthebed.net",
|
||||||
"prismblush.com",
|
"prismblush.com",
|
||||||
"www.konradokonski.com",
|
"www.konradokonski.com",
|
||||||
@ -87,6 +88,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern buttsmithyIncasePat = Pattern.compile("https?://incase.buttsmithy.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
|
Matcher buttsmithyIncaseMat = buttsmithyIncasePat.matcher(url.toExternalForm());
|
||||||
|
if (buttsmithyIncaseMat.matches()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
|
Pattern theMonsterUnderTheBedPat = Pattern.compile("https?://themonsterunderthebed.net/\\?comic=([a-zA-Z0-9_-]*)/?$");
|
||||||
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
|
Matcher theMonsterUnderTheBedMat = theMonsterUnderTheBedPat.matcher(url.toExternalForm());
|
||||||
if (theMonsterUnderTheBedMat.matches()) {
|
if (theMonsterUnderTheBedMat.matches()) {
|
||||||
@ -178,6 +185,12 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
|
|||||||
return getHost() + "_" + prismblushMat.group(1);
|
return getHost() + "_" + prismblushMat.group(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pattern buttsmithyIncasePat = Pattern.compile("https?://incase.buttsmithy.com/comic/([a-zA-Z0-9_-]*)/?$");
|
||||||
|
Matcher buttsmithyIncaseMat = buttsmithyIncasePat.matcher(url.toExternalForm());
|
||||||
|
if (buttsmithyIncaseMat.matches()) {
|
||||||
|
return getHost() + "_" + buttsmithyIncaseMat.group(1).replaceAll("-page-\\d", "").replaceAll("-pg-\\d", "");
|
||||||
|
}
|
||||||
|
|
||||||
Pattern comicsxxxPat = Pattern.compile("https?://comics-xxx.com/([a-zA-Z0-9_\\-]*)/?$");
|
Pattern comicsxxxPat = Pattern.compile("https?://comics-xxx.com/([a-zA-Z0-9_\\-]*)/?$");
|
||||||
Matcher comicsxxxMat = comicsxxxPat.matcher(url.toExternalForm());
|
Matcher comicsxxxMat = comicsxxxPat.matcher(url.toExternalForm());
|
||||||
if (comicsxxxMat.matches()) {
|
if (comicsxxxMat.matches()) {
|
||||||
|
@ -21,7 +21,7 @@ import com.rarchives.ripme.utils.Utils;
|
|||||||
public class UpdateUtils {
|
public class UpdateUtils {
|
||||||
|
|
||||||
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
private static final Logger logger = Logger.getLogger(UpdateUtils.class);
|
||||||
private static final String DEFAULT_VERSION = "1.7.24";
|
private static final String DEFAULT_VERSION = "1.7.27";
|
||||||
private static final String REPO_NAME = "ripmeapp/ripme";
|
private static final String REPO_NAME = "ripmeapp/ripme";
|
||||||
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json";
|
||||||
private static final String mainFileName = "ripme.jar";
|
private static final String mainFileName = "ripme.jar";
|
||||||
|
Loading…
Reference in New Issue
Block a user