package com.rarchives.ripme.ripper.rippers; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; class TapasticEpisode { int index; int id; String title; String filename; public TapasticEpisode(int index, int id, String title) { this.index=index; this.id=id; this.title=title; this.filename=title // Windows filenames may not contain any of these... .replace("\\", "") .replace("/", "") .replace(":", "") .replace("*", "") .replace("?", "") .replace("\"", "") .replace("<", "") .replace(">", "") .replace("|", ""); } } public class TapasticRipper extends AbstractHTMLRipper { private List episodes=new ArrayList(); public TapasticRipper(URL url) throws IOException { super(url); } @Override public String getDomain() { return "tapastic.com"; } @Override public String getHost() { return "tapastic"; } @Override public Document getFirstPage() throws IOException { return Http.url(url).get(); } @Override public List getURLsFromPage(Document page) { List urls = new ArrayList(); Elements scripts=page.select("script"); for(Element script: scripts) { String text=script.data(); if(text.contains("var _data")) { String[] lines=text.split("\n"); for(String line:lines) { String trimmed=line.trim(); if(trimmed.startsWith("episodeList : ")) { JSONArray json_episodes=new JSONArray(trimmed.substring("episodeList : ".length())); for(int i=0;i