Merge branch 'formatting': Various code cleanup and formatting.

This commit is contained in:
MetaPrime 2017-05-09 17:51:19 -07:00
commit dc2c100d9d
52 changed files with 745 additions and 788 deletions

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -8,24 +7,16 @@ import com.rarchives.ripme.utils.Utils;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
*
* @author
*/
public class E621Ripper extends AbstractHTMLRipper {
public static final int POOL_IMAGES_PER_PAGE = 24;
@ -177,5 +168,4 @@ public class E621Ripper extends AbstractHTMLRipper {
return Utils.filesystemSafe(prefix + getTerm(url));
}
}

View File

@ -9,7 +9,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -18,10 +17,8 @@ import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.jsoup.Connection.Method;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
/**

View File

@ -11,11 +11,8 @@ import java.util.regex.Pattern;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractJSONRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
public class InstagramRipper extends AbstractJSONRipper {

View File

@ -2,24 +2,15 @@ package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.Utils;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class MyhentaicomicsRipper extends AbstractHTMLRipper {
public static boolean isTag;

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -13,7 +12,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
import com.rarchives.ripme.utils.Http;
public class NatalieMuRipper extends AbstractHTMLRipper {

View File

@ -21,10 +21,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
*
* @author
*/
public class PahealRipper extends AbstractHTMLRipper {
private static Map<String, String> cookies = null;
private static Pattern gidPattern = null;
@ -59,9 +55,10 @@ public class PahealRipper extends AbstractHTMLRipper{
@Override
public Document getNextPage(Document page) throws IOException {
for (Element e : page.select("#paginator a")) {
if(e.text().toLowerCase().equals("next"))
if (e.text().toLowerCase().equals("next")) {
return Http.url(e.absUrl("href")).cookies(getCookies()).get();
}
}
return null;
}
@ -71,8 +68,9 @@ public class PahealRipper extends AbstractHTMLRipper{
Elements elements = page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
List<String> res = new ArrayList<String>(elements.size());
for(Element e:elements)
for (Element e : elements) {
res.add(e.absUrl("href"));
}
return res;
}
@ -89,7 +87,11 @@ public class PahealRipper extends AbstractHTMLRipper{
name = name.substring(0, name.length() - ext.length());
}
addURLToDownload(url,new File(workingDir.getCanonicalPath()+File.separator+Utils.filesystemSafe(new URI(name).getPath())+ext));
File outFile = new File(workingDir.getCanonicalPath()
+ File.separator
+ Utils.filesystemSafe(new URI(name).getPath())
+ ext);
addURLToDownload(url, outFile);
} catch (IOException ex) {
Logger.getLogger(PahealRipper.class.getName()).log(Level.SEVERE, null, ex);
} catch (URISyntaxException ex) {
@ -98,12 +100,14 @@ public class PahealRipper extends AbstractHTMLRipper{
}
private String getTerm(URL url) throws MalformedURLException {
if(gidPattern==null)
if (gidPattern == null) {
gidPattern = Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
}
Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches())
if (m.matches()) {
return m.group(2);
}
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
}
@ -118,5 +122,4 @@ public class PahealRipper extends AbstractHTMLRipper{
throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
}
}

View File

@ -11,19 +11,18 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;
public class WordpressComicRipper extends AbstractHTMLRipper {
public WordpressComicRipper(URL url) throws IOException {
super(url);
}
public static List<String> explicit_domains = Arrays.asList("www.totempole666.com",
"buttsmithy.com", "themonsterunderthebed.net", "prismblush.com");
@Override
public String getHost() {
String host = url.toExternalForm().split("/")[2];
@ -63,8 +62,8 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
if (prismblushMat.matches()) {
return true;
}
}
return false;
}
@ -97,7 +96,6 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
return super.getAlbumTitle(url);
}
@Override
public String getGID(URL url) throws MalformedURLException {
String url_name = url.toExternalForm();
@ -129,10 +127,10 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
}
nextPage = elem.attr("href");
}
if (nextPage == "") {
throw new IOException("No more pages");
}
else {
} else {
return Http.url(nextPage).get();
}
}
@ -160,6 +158,4 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
public void downloadURL(URL url, int index) {
addURLToDownload(url, getPrefix(index));
}
}

View File

@ -1,4 +1,3 @@
package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
@ -18,10 +17,6 @@ import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
/**
*
* @author
*/
public class XbooruRipper extends AbstractHTMLRipper {
private static Pattern gidPattern = null;
@ -53,8 +48,9 @@ public class XbooruRipper extends AbstractHTMLRipper{
int offset = Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
int num = Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
if(offset+100>num)
if (offset + 100 > num) {
return null;
}
return Http.url(getPage(offset / 100 + 1)).get();
}
@ -62,8 +58,9 @@ public class XbooruRipper extends AbstractHTMLRipper{
@Override
public List<String> getURLsFromPage(Document page) {
List<String> res = new ArrayList<String>(100);
for(Element e:page.getElementsByTag("post"))
for (Element e : page.getElementsByTag("post")) {
res.add(e.absUrl("file_url") + "#" + e.attr("id"));
}
return res;
}
@ -73,12 +70,14 @@ public class XbooruRipper extends AbstractHTMLRipper{
}
private String getTerm(URL url) throws MalformedURLException {
if(gidPattern==null)
if (gidPattern == null) {
gidPattern = Pattern.compile("^https?://(www\\.)?xbooru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(\\&|(#.*)?$)");
}
Matcher m = gidPattern.matcher(url.toExternalForm());
if(m.matches())
if (m.matches()) {
return m.group(4);
}
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
}
@ -93,5 +92,4 @@ public class XbooruRipper extends AbstractHTMLRipper{
throw new MalformedURLException("Expected xbooru.com URL format: xbooru.com/index.php?tags=searchterm - got " + url + " instead");
}
}

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -14,10 +13,8 @@ import java.util.regex.Pattern;
import org.jsoup.Connection.Response;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
import com.rarchives.ripme.utils.Http;
public class ZizkiRipper extends AbstractHTMLRipper {

View File

@ -9,16 +9,20 @@ public class ChanSite {
public List<String> cdnDomains;
public ChanSite(List<String> Domains, List<String> CdnDomains) {
if(Domains.isEmpty())
if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains");
if(CdnDomains.isEmpty())
}
if (CdnDomains.isEmpty()) {
throw new IllegalArgumentException("CdnDomains");
}
domains = Domains;
cdnDomains = CdnDomains;
}
public ChanSite(List<String> Domains) {
if(Domains.isEmpty())
if (Domains.isEmpty()) {
throw new IllegalArgumentException("Domains");
}
domains = Domains;
cdnDomains = Domains;
}

View File

@ -3,7 +3,6 @@ package com.rarchives.ripme.utils;
/**
* Base64 encoder/decoder
* From http://stackoverflow.com/a/4265472
*
*/
public class Base64 {
private final static char[] ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".toCharArray();

View File

@ -23,7 +23,6 @@ import com.rarchives.ripme.ripper.rippers.VidbleRipper;
import com.rarchives.ripme.ripper.rippers.video.GfycatRipper;
import com.rarchives.ripme.ripper.rippers.EroShareRipper;
public class RipUtils {
private static final Logger logger = Logger.getLogger(RipUtils.class);
@ -165,18 +164,6 @@ public class RipUtils {
if (url == null) url = urlFromSiteDirectoryName(dir, "vinebox", "http://finebox.co/u/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "imgbox", "http://imgbox.com/g/", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "modelmayhem", "http://www.modelmayhem.com/", "");
/*
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
if (url == null) url = urlFromSiteDirectoryName(dir, "", "", "");
*/
//if (url == null) url = urlFromSiteDirectoryName(dir, "8muses", "http://www.8muses.com/index/category/", "");
return url;
}
@ -249,8 +236,7 @@ public class RipUtils {
String album = fields.get(1);
String url = "http://";
if ((fields.contains("top") || fields.contains("new"))
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))
) {
&& (fields.contains("year") || fields.contains("month") || fields.contains("week") || fields.contains("all"))) {
// Subreddit
fields.remove(0); // "imgur"
String sub = "";

View File

@ -34,7 +34,6 @@ import com.rarchives.ripme.ripper.AbstractRipper;
* Common utility functions used in various places throughout the project.
*/
public class Utils {
public static final String RIP_DIRECTORY = "rips";
private static final String configFile = "rip.properties";
private static final Logger logger = Logger.getLogger(Utils.class);
@ -180,7 +179,9 @@ public class Utils {
int nextParam = url.indexOf("&", paramIndex+1);
if (nextParam != -1) {
String c = "&";
if(wasFirstParam) c = "?";
if (wasFirstParam) {
c = "?";
}
url = url.substring(0, paramIndex) + c + url.substring(nextParam+1, url.length());
} else {
url = url.substring(0, paramIndex);