Some minor code clean up

This commit is contained in:
cyian-1756 2018-04-19 11:35:36 -04:00
parent 1dadd26e05
commit be24df4d48
13 changed files with 12 additions and 28 deletions

View File

@ -55,14 +55,8 @@ public class BlackbrickroadofozRipper extends AbstractHTMLRipper {
throw new IOException("No more pages");
}
String nextPage = elem.attr("href");
// Some times this returns a empty string
// This for stops that
if (nextPage == "") {
throw new IOException("No more pages");
}
else {
return Http.url(nextPage).get();
}
return Http.url(nextPage).get();
}
@Override

View File

@ -59,7 +59,7 @@ public class CfakeRipper extends AbstractHTMLRipper {
String nextPage = elem.attr("href");
// Some times this returns a empty string
// This for stops that
if (nextPage == "") {
if (nextPage.equals("")) {
return null;
}
else {

View File

@ -4,7 +4,6 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

View File

@ -86,7 +86,7 @@ public class EroShareRipper extends AbstractHTMLRipper {
throw new IOException("No more pages");
}
nextUrl = elem.attr("href");
if (nextUrl == "") {
if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
return Http.url("eroshae.com" + nextUrl).get();

View File

@ -13,7 +13,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.rarchives.ripme.utils.Utils;
import org.jsoup.Connection.Method;
import org.jsoup.Connection.Response;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
@ -23,7 +22,6 @@ import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.DownloadThreadPool;
import com.rarchives.ripme.utils.Base64;
import com.rarchives.ripme.utils.Http;
public class FuraffinityRipper extends AbstractHTMLRipper {
@ -162,10 +160,6 @@ public class FuraffinityRipper extends AbstractHTMLRipper {
if (!subdirectory.equals("")) {
subdirectory = File.separator + subdirectory;
}
int o = url.toString().lastIndexOf('/')-1;
String test = url.toString().substring(url.toString().lastIndexOf('/',o)+1);
test = test.replace("/",""); // This is probably not the best way to do this.
test = test.replace("\\",""); // CLOSE ENOUGH!
saveFileAs = new File(
workingDir.getCanonicalPath()
+ subdirectory

View File

@ -50,7 +50,7 @@ public class LusciousRipper extends AbstractHTMLRipper {
// This is here for pages with mp4s instead of images
String video_image = "";
video_image = page.select("div > video > source").attr("src");
if (video_image != "") {
if (!video_image.equals("")) {
urls.add(video_image);
}
return urls;

View File

@ -73,7 +73,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
if (m.matches()) {
nextUrl = "http://myhentaicomics.com" + m.group(0);
}
if (nextUrl == "") {
if (nextUrl.equals("")) {
throw new IOException("No more pages");
}
// Sleep for half a sec to avoid getting IP banned
@ -100,7 +100,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
Element elem = nextAlbumPage.select("a.ui-icon-right").first();
String nextPage = elem.attr("href");
pageNumber = pageNumber + 1;
if (nextPage == "") {
if (nextPage.equals("")) {
logger.info("Got " + pageNumber + " pages");
break;
}
@ -220,7 +220,7 @@ public class MyhentaicomicsRipper extends AbstractHTMLRipper {
// If true the page is a page of albums
if (doc.toString().contains("class=\"g-item g-album\"")) {
// This if checks that there is more than 1 page
if (doc.select("a.ui-icon-right").last().attr("href") != "") {
if (!doc.select("a.ui-icon-right").last().attr("href").equals("")) {
// There is more than one page so we call getListOfPages
List<String> pagesToRip = getListOfPages(doc);
logger.debug("Pages to rip = " + pagesToRip);

View File

@ -57,7 +57,7 @@ public class SinfestRipper extends AbstractHTMLRipper {
String nextPage = elem.parent().attr("href");
// Some times this returns a empty string
// This for stops that
if (nextPage == "") {
if (nextPage.equals("")) {
return null;
}
else {

View File

@ -4,14 +4,12 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.utils.Http;

View File

@ -247,7 +247,7 @@ public class WordpressComicRipper extends AbstractHTMLRipper {
nextPage = elem.attr("href");
}
if (nextPage == "") {
if (nextPage.equals("")) {
throw new IOException("No more pages");
} else {
return Http.url(nextPage).get();

View File

@ -72,7 +72,7 @@ public class XhamsterRipper extends AbstractHTMLRipper {
@Override
public Document getNextPage(Document doc) throws IOException {
if (doc.select("a.next").first().attr("href") != "") {
if (!doc.select("a.next").first().attr("href").equals("")) {
return Http.url(doc.select("a.next").first().attr("href")).get();
} else {
throw new IOException("No more pages");

View File

@ -3,7 +3,6 @@ package com.rarchives.ripme.ripper.rippers.video;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

View File

@ -3,7 +3,7 @@ package com.rarchives.ripme.tst.ripper.rippers;
import java.io.IOException;
import java.net.URL;
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;;
import com.rarchives.ripme.ripper.rippers.AerisdiesRipper;
public class AerisdiesRipperTest extends RippersTest {
public void testAlbum() throws IOException {