2014-06-22 02:08:42 +02:00
|
|
|
package com.rarchives.ripme.ripper;
|
|
|
|
|
2014-11-29 05:59:39 +01:00
|
|
|
import java.io.File;
|
|
|
|
import java.io.FileOutputStream;
|
2014-06-22 02:08:42 +02:00
|
|
|
import java.io.IOException;
|
|
|
|
import java.net.MalformedURLException;
|
|
|
|
import java.net.URL;
|
|
|
|
import java.util.List;
|
|
|
|
|
|
|
|
import org.jsoup.nodes.Document;
|
|
|
|
|
|
|
|
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
|
|
|
|
import com.rarchives.ripme.utils.Utils;
|
|
|
|
|
2014-06-23 04:17:40 +02:00
|
|
|
/**
|
|
|
|
* Simplified ripper, designed for ripping from sites by parsing HTML.
|
|
|
|
*/
|
2014-06-23 04:12:29 +02:00
|
|
|
public abstract class AbstractHTMLRipper extends AlbumRipper {
|
2014-11-29 05:59:39 +01:00
|
|
|
|
2014-06-23 04:12:29 +02:00
|
|
|
public AbstractHTMLRipper(URL url) throws IOException {
|
2014-06-22 02:08:42 +02:00
|
|
|
super(url);
|
|
|
|
}
|
|
|
|
|
|
|
|
public abstract String getDomain();
|
|
|
|
public abstract String getHost();
|
|
|
|
|
|
|
|
public abstract Document getFirstPage() throws IOException;
|
2014-06-23 04:17:40 +02:00
|
|
|
public Document getNextPage(Document doc) throws IOException {
|
2015-02-06 12:01:02 +01:00
|
|
|
return null;
|
2014-06-23 04:17:40 +02:00
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
public abstract List<String> getURLsFromPage(Document page);
|
2014-11-29 05:59:39 +01:00
|
|
|
public List<String> getDescriptionsFromPage(Document doc) throws IOException {
|
|
|
|
throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
public abstract void downloadURL(URL url, int index);
|
2014-06-23 04:12:29 +02:00
|
|
|
public DownloadThreadPool getThreadPool() {
|
|
|
|
return null;
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
|
|
|
|
public boolean keepSortOrder() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean canRip(URL url) {
|
|
|
|
return url.getHost().endsWith(getDomain());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public URL sanitizeURL(URL url) throws MalformedURLException {
|
|
|
|
return url;
|
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
public boolean hasDescriptionSupport() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
public String getDescription(String page) throws IOException {
|
|
|
|
throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
|
|
|
|
}
|
2015-05-29 20:26:48 +02:00
|
|
|
public int descSleepTime() {
|
|
|
|
return 0;
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
@Override
|
|
|
|
public void rip() throws IOException {
|
|
|
|
int index = 0;
|
2014-11-29 05:59:39 +01:00
|
|
|
int textindex = 0;
|
2014-06-22 02:08:42 +02:00
|
|
|
logger.info("Retrieving " + this.url);
|
|
|
|
sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
|
|
|
|
Document doc = getFirstPage();
|
2014-11-29 05:59:39 +01:00
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
while (doc != null) {
|
|
|
|
List<String> imageURLs = getURLsFromPage(doc);
|
2015-02-10 08:29:29 +01:00
|
|
|
// Remove all but 1 image
|
|
|
|
if (isThisATest()) {
|
|
|
|
while (imageURLs.size() > 1) {
|
|
|
|
imageURLs.remove(1);
|
|
|
|
}
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
|
|
|
|
if (imageURLs.size() == 0) {
|
2014-07-20 10:31:45 +02:00
|
|
|
throw new IOException("No images found at " + doc.location());
|
2014-06-22 02:08:42 +02:00
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
for (String imageURL : imageURLs) {
|
2015-02-06 08:58:17 +01:00
|
|
|
index += 1;
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Found image url #" + index + ": " + imageURL);
|
2015-02-06 08:58:17 +01:00
|
|
|
downloadURL(new URL(imageURL), index);
|
2014-06-22 02:08:42 +02:00
|
|
|
if (isStopped()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
if (hasDescriptionSupport()) {
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Fetching description(s) from " + doc.location());
|
2014-11-29 05:59:39 +01:00
|
|
|
List<String> textURLs = getDescriptionsFromPage(doc);
|
|
|
|
if (textURLs.size() > 0) {
|
2015-05-29 20:26:48 +02:00
|
|
|
logger.debug("Found description link(s) from " + doc.location());
|
2014-11-29 05:59:39 +01:00
|
|
|
for (String textURL : textURLs) {
|
|
|
|
if (isStopped()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
textindex += 1;
|
2015-05-29 20:26:48 +02:00
|
|
|
logger.debug("Getting description from " + textURL);
|
|
|
|
sleep(descSleepTime());
|
2014-11-29 05:59:39 +01:00
|
|
|
String tempDesc = getDescription(textURL);
|
|
|
|
if (tempDesc != null) {
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Got description: " + tempDesc);
|
2014-11-29 05:59:39 +01:00
|
|
|
saveText(new URL(textURL), "", tempDesc, textindex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-06-23 04:12:29 +02:00
|
|
|
|
2015-02-10 08:29:29 +01:00
|
|
|
if (isStopped() || isThisATest()) {
|
2014-06-23 04:12:29 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-06-22 02:08:42 +02:00
|
|
|
try {
|
2014-06-23 04:12:29 +02:00
|
|
|
sendUpdate(STATUS.LOADING_RESOURCE, "next page");
|
2014-06-22 02:08:42 +02:00
|
|
|
doc = getNextPage(doc);
|
|
|
|
} catch (IOException e) {
|
|
|
|
logger.info("Can't get next page: " + e.getMessage());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-06-23 04:12:29 +02:00
|
|
|
|
|
|
|
// If they're using a thread pool, wait for it.
|
|
|
|
if (getThreadPool() != null) {
|
2015-02-10 08:29:29 +01:00
|
|
|
logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
|
2014-06-23 04:12:29 +02:00
|
|
|
getThreadPool().waitForThreads();
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
waitForThreads();
|
|
|
|
}
|
2014-11-29 05:59:39 +01:00
|
|
|
public boolean saveText(URL url, String subdirectory, String text, int index) {
|
2015-05-29 20:26:48 +02:00
|
|
|
// Not the best for some cases, like FurAffinity. Overridden there.
|
2014-11-29 05:59:39 +01:00
|
|
|
try {
|
|
|
|
stopCheck();
|
|
|
|
} catch (IOException e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
String saveAs = url.toExternalForm();
|
|
|
|
saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
|
|
|
|
if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
|
|
|
|
if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
|
|
|
|
if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
|
|
|
|
if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
|
|
|
|
File saveFileAs;
|
|
|
|
try {
|
|
|
|
if (!subdirectory.equals("")) { // Not sure about this part
|
|
|
|
subdirectory = File.separator + subdirectory;
|
|
|
|
}
|
2015-05-29 20:26:48 +02:00
|
|
|
// TODO Get prefix working again, probably requires reworking a lot of stuff! (Might be fixed now)
|
2014-11-29 05:59:39 +01:00
|
|
|
saveFileAs = new File(
|
|
|
|
workingDir.getCanonicalPath()
|
|
|
|
+ subdirectory
|
|
|
|
+ File.separator
|
|
|
|
+ getPrefix(index)
|
|
|
|
+ saveAs
|
|
|
|
+ ".txt");
|
|
|
|
// Write the file
|
|
|
|
FileOutputStream out = (new FileOutputStream(saveFileAs));
|
|
|
|
out.write(text.getBytes());
|
|
|
|
out.close();
|
|
|
|
} catch (IOException e) {
|
|
|
|
logger.error("[!] Error creating save file path for description '" + url + "':", e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
logger.debug("Downloading " + url + "'s description to " + saveFileAs);
|
|
|
|
if (!saveFileAs.getParentFile().exists()) {
|
|
|
|
logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
|
|
|
|
saveFileAs.getParentFile().mkdirs();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2014-06-22 02:08:42 +02:00
|
|
|
public String getPrefix(int index) {
|
|
|
|
String prefix = "";
|
|
|
|
if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
|
|
|
|
prefix = String.format("%03d_", index);
|
|
|
|
}
|
|
|
|
return prefix;
|
|
|
|
}
|
2015-10-15 21:27:34 +02:00
|
|
|
}
|