2014-06-22 02:08:42 +02:00
|
|
|
package com.rarchives.ripme.ripper;
|
|
|
|
|
2014-11-29 05:59:39 +01:00
|
|
|
import java.io.File;
|
|
|
|
import java.io.FileOutputStream;
|
2014-06-22 02:08:42 +02:00
|
|
|
import java.io.IOException;
|
|
|
|
import java.net.MalformedURLException;
|
|
|
|
import java.net.URL;
|
|
|
|
import java.util.List;
|
|
|
|
|
|
|
|
import org.jsoup.nodes.Document;
|
|
|
|
|
|
|
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
|
|
|
import com.rarchives.ripme.utils.Utils;
|
|
|
|
|
2014-06-23 04:17:40 +02:00
|
|
|
/**
|
|
|
|
* Simplified ripper, designed for ripping from sites by parsing HTML.
|
|
|
|
*/
|
2014-06-23 04:12:29 +02:00
|
|
|
public abstract class AbstractHTMLRipper extends AlbumRipper {
|
2017-05-15 19:24:36 +02:00
|
|
|
|
2017-10-24 16:33:28 +02:00
|
|
|
protected AbstractHTMLRipper(URL url) throws IOException {
|
2014-06-22 02:08:42 +02:00
|
|
|
super(url);
|
|
|
|
}
|
|
|
|
|
2017-10-24 16:33:28 +02:00
|
|
|
protected abstract String getDomain();
|
2014-06-22 02:08:42 +02:00
|
|
|
public abstract String getHost();
|
|
|
|
|
2017-10-24 16:33:28 +02:00
|
|
|
protected abstract Document getFirstPage() throws IOException;
|
2014-06-23 04:17:40 +02:00
|
|
|
public Document getNextPage(Document doc) throws IOException {
|
2015-02-06 12:01:02 +01:00
|
|
|
return null;
|
2014-06-23 04:17:40 +02:00
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected abstract List<String> getURLsFromPage(Document page);
|
|
|
|
protected List<String> getDescriptionsFromPage(Document doc) throws IOException {
|
2017-05-15 19:24:36 +02:00
|
|
|
throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
|
2014-11-29 05:59:39 +01:00
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected abstract void downloadURL(URL url, int index);
|
|
|
|
protected DownloadThreadPool getThreadPool() {
|
2014-06-23 04:12:29 +02:00
|
|
|
return null;
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
|
2017-10-24 16:33:28 +02:00
|
|
|
protected boolean keepSortOrder() {
|
2014-06-22 02:08:42 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean canRip(URL url) {
|
|
|
|
return url.getHost().endsWith(getDomain());
|
|
|
|
}
|
2017-05-15 19:24:36 +02:00
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
@Override
|
|
|
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
|
|
|
return url;
|
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected boolean hasDescriptionSupport() {
|
2017-05-15 19:24:36 +02:00
|
|
|
return false;
|
2014-11-29 05:59:39 +01:00
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected String[] getDescription(String url, Document page) throws IOException {
|
2017-05-15 19:24:36 +02:00
|
|
|
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
|
2014-11-29 05:59:39 +01:00
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected int descSleepTime() {
|
2017-04-29 21:35:39 +02:00
|
|
|
return 100;
|
2015-05-29 20:26:48 +02:00
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
@Override
|
|
|
|
public void rip() throws IOException {
|
|
|
|
int index = 0;
|
2014-11-29 05:59:39 +01:00
|
|
|
int textindex = 0;
|
2014-06-22 02:08:42 +02:00
|
|
|
logger.info("Retrieving " + this.url);
|
|
|
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
|
|
|
Document doc = getFirstPage();
|
2017-05-15 19:24:36 +02:00
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
while (doc != null) {
|
|
|
|
List<String> imageURLs = getURLsFromPage(doc);
|
2015-02-10 08:29:29 +01:00
|
|
|
// Remove all but 1 image
|
|
|
|
if (isThisATest()) {
|
|
|
|
while (imageURLs.size() > 1) {
|
|
|
|
imageURLs.remove(1);
|
|
|
|
}
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
|
|
|
|
if (imageURLs.size() == 0) {
|
2014-07-20 10:31:45 +02:00
|
|
|
throw new IOException("No images found at " + doc.location());
|
2014-06-22 02:08:42 +02:00
|
|
|
}
|
2017-05-15 19:24:36 +02:00
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
for (String imageURL : imageURLs) {
|
2015-02-06 08:58:17 +01:00
|
|
|
index += 1;
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Found image url #" + index + ": " + imageURL);
|
2015-02-06 08:58:17 +01:00
|
|
|
downloadURL(new URL(imageURL), index);
|
2014-06-22 02:08:42 +02:00
|
|
|
if (isStopped()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-12-21 16:36:56 +01:00
|
|
|
if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Fetching description(s) from " + doc.location());
|
2017-05-15 19:24:36 +02:00
|
|
|
List<String> textURLs = getDescriptionsFromPage(doc);
|
|
|
|
if (textURLs.size() > 0) {
|
2015-05-29 20:26:48 +02:00
|
|
|
logger.debug("Found description link(s) from " + doc.location());
|
2017-05-15 19:24:36 +02:00
|
|
|
for (String textURL : textURLs) {
|
|
|
|
if (isStopped()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
textindex += 1;
|
|
|
|
logger.debug("Getting description from " + textURL);
|
2017-04-29 21:35:39 +02:00
|
|
|
String[] tempDesc = getDescription(textURL,doc);
|
|
|
|
if (tempDesc != null) {
|
|
|
|
if (Utils.getConfigBoolean("file.overwrite", false) || !(new File(
|
|
|
|
workingDir.getCanonicalPath()
|
|
|
|
+ ""
|
|
|
|
+ File.separator
|
|
|
|
+ getPrefix(index)
|
|
|
|
+ (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL)))
|
|
|
|
+ ".txt").exists())) {
|
|
|
|
logger.debug("Got description from " + textURL);
|
|
|
|
saveText(new URL(textURL), "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : fileNameFromURL(new URL(textURL))));
|
|
|
|
sleep(descSleepTime());
|
|
|
|
} else {
|
|
|
|
logger.debug("Description from " + textURL + " already exists.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-15 19:24:36 +02:00
|
|
|
}
|
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
}
|
2014-06-23 04:12:29 +02:00
|
|
|
|
2015-02-10 08:29:29 +01:00
|
|
|
if (isStopped() || isThisATest()) {
|
2014-06-23 04:12:29 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
try {
|
2014-06-23 04:12:29 +02:00
|
|
|
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
2014-06-22 02:08:42 +02:00
|
|
|
doc = getNextPage(doc);
|
|
|
|
} catch (IOException e) {
|
|
|
|
logger.info("Can't get next page: " + e.getMessage());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-06-23 04:12:29 +02:00
|
|
|
|
|
|
|
// If they're using a thread pool, wait for it.
|
|
|
|
if (getThreadPool() != null) {
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
2014-06-23 04:12:29 +02:00
|
|
|
getThreadPool().waitForThreads();
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
waitForThreads();
|
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
private String fileNameFromURL(URL url) {
|
2014-11-29 05:59:39 +01:00
|
|
|
String saveAs = url.toExternalForm();
|
2017-05-15 19:24:36 +02:00
|
|
|
if (saveAs.substring(saveAs.length() - 1) == "/") { saveAs = saveAs.substring(0,saveAs.length() - 1) ;}
|
2014-11-29 05:59:39 +01:00
|
|
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
|
|
|
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
|
|
|
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
|
|
|
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
|
|
|
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
2017-04-29 21:35:39 +02:00
|
|
|
return saveAs;
|
|
|
|
}
|
|
|
|
public boolean saveText(URL url, String subdirectory, String text, int index) {
|
|
|
|
String saveAs = fileNameFromURL(url);
|
2017-04-27 06:13:11 +02:00
|
|
|
return saveText(url,subdirectory,text,index,saveAs);
|
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
private boolean saveText(URL url, String subdirectory, String text, int index, String fileName) {
|
2017-04-27 06:13:11 +02:00
|
|
|
// Not the best for some cases, like FurAffinity. Overridden there.
|
|
|
|
try {
|
|
|
|
stopCheck();
|
|
|
|
} catch (IOException e) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
File saveFileAs;
|
|
|
|
try {
|
|
|
|
if (!subdirectory.equals("")) { // Not sure about this part
|
|
|
|
subdirectory = File.separator + subdirectory;
|
|
|
|
}
|
2015-05-29 20:26:48 +02:00
|
|
|
// TODO Get prefix working again, probably requires reworking a lot of stuff! (Might be fixed now)
|
2014-11-29 05:59:39 +01:00
|
|
|
saveFileAs = new File(
|
|
|
|
workingDir.getCanonicalPath()
|
|
|
|
+ subdirectory
|
|
|
|
+ File.separator
|
|
|
|
+ getPrefix(index)
|
2017-04-27 06:13:11 +02:00
|
|
|
+ fileName
|
2014-11-29 05:59:39 +01:00
|
|
|
+ ".txt");
|
|
|
|
// Write the file
|
|
|
|
FileOutputStream out = (new FileOutputStream(saveFileAs));
|
|
|
|
out.write(text.getBytes());
|
|
|
|
out.close();
|
|
|
|
} catch (IOException e) {
|
|
|
|
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
|
|
|
if (!saveFileAs.getParentFile().exists()) {
|
|
|
|
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
|
|
|
saveFileAs.getParentFile().mkdirs();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2017-10-24 16:33:28 +02:00
|
|
|
protected String getPrefix(int index) {
|
2014-06-22 02:08:42 +02:00
|
|
|
String prefix = "";
|
|
|
|
if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
|
|
|
|
prefix = String.format("%03d_", index);
|
|
|
|
}
|
|
|
|
return prefix;
|
|
|
|
}
|
2015-10-15 21:27:34 +02:00
|
|
|
}
|