2 changed files with 305 additions and 0 deletions
@ -0,0 +1,212 @@
@@ -0,0 +1,212 @@
|
||||
import Base64 from "crypto-js/enc-base64"; |
||||
import Utf8 from "crypto-js/enc-utf8"; |
||||
|
||||
import { MWEmbedType } from "@/backend/helpers/embed"; |
||||
import { proxiedFetch } from "@/backend/helpers/fetch"; |
||||
import { registerEmbedScraper } from "@/backend/helpers/register"; |
||||
import { |
||||
MWCaptionType, |
||||
MWStreamQuality, |
||||
MWStreamType, |
||||
} from "@/backend/helpers/streams"; |
||||
|
||||
const qualityOrder = [ |
||||
MWStreamQuality.Q1080P, |
||||
MWStreamQuality.Q720P, |
||||
MWStreamQuality.Q480P, |
||||
MWStreamQuality.Q360P, |
||||
]; |
||||
|
||||
async function fetchCaptchaToken(domain: string, recaptchaKey: string) { |
||||
const domainHash = Base64.stringify(Utf8.parse(domain)).replace(/=/g, "."); |
||||
|
||||
const recaptchaRender = await proxiedFetch<any>( |
||||
`https://www.google.com/recaptcha/api.js?render=${recaptchaKey}` |
||||
); |
||||
|
||||
const vToken = recaptchaRender.substring( |
||||
recaptchaRender.indexOf("/releases/") + 10, |
||||
recaptchaRender.indexOf("/recaptcha__en.js") |
||||
); |
||||
|
||||
const recaptchaAnchor = await proxiedFetch<any>( |
||||
`https://www.google.com/recaptcha/api2/anchor?ar=1&hl=en&size=invisible&cb=flicklax&k=${recaptchaKey}&co=${domainHash}&v=${vToken}` |
||||
); |
||||
|
||||
const cToken = new DOMParser() |
||||
.parseFromString(recaptchaAnchor, "text/html") |
||||
.getElementById("recaptcha-token") |
||||
?.getAttribute("value"); |
||||
|
||||
if (!cToken) throw new Error("Unable to find cToken"); |
||||
|
||||
const payload = { |
||||
v: vToken, |
||||
reason: "q", |
||||
k: recaptchaKey, |
||||
c: cToken, |
||||
sa: "", |
||||
co: domain, |
||||
}; |
||||
|
||||
const tokenData = await proxiedFetch<string>( |
||||
`https://www.google.com/recaptcha/api2/reload?${new URLSearchParams( |
||||
payload |
||||
).toString()}`,
|
||||
{ |
||||
headers: { referer: "https://www.google.com/recaptcha/api2/" }, |
||||
method: "POST", |
||||
} |
||||
); |
||||
|
||||
const token = tokenData.match('rresp","(.+?)"'); |
||||
return token ? token[1] : null; |
||||
} |
||||
|
||||
registerEmbedScraper({ |
||||
id: "streamsb", |
||||
displayName: "StreamSB", |
||||
for: MWEmbedType.STREAMSB, |
||||
rank: 150, |
||||
async getStream({ url, progress }) { |
||||
/* Url variations |
||||
- domain.com/{id}?.html |
||||
- domain.com/{id} |
||||
- domain.com/embed-{id} |
||||
- domain.com/d/{id} |
||||
- domain.com/e/{id} |
||||
- domain.com/e/{id}-embed |
||||
*/ |
||||
const streamsbUrl = url |
||||
.replace(".html", "") |
||||
.replace("embed-", "") |
||||
.replace("e/", "") |
||||
.replace("d/", ""); |
||||
|
||||
const parsedUrl = new URL(streamsbUrl); |
||||
const base = await proxiedFetch<any>( |
||||
`${parsedUrl.origin}/d${parsedUrl.pathname}` |
||||
); |
||||
|
||||
progress(20); |
||||
|
||||
// Parse captions from url
|
||||
const captionUrl = parsedUrl.searchParams.get("caption_1"); |
||||
const captionLang = parsedUrl.searchParams.get("sub_1"); |
||||
|
||||
const basePage = new DOMParser().parseFromString(base, "text/html"); |
||||
|
||||
const downloadVideoFunctions = basePage.querySelectorAll( |
||||
"[onclick^=download_video]" |
||||
); |
||||
|
||||
const dlDetails = []; |
||||
for (const func of downloadVideoFunctions) { |
||||
const funcContents = func.getAttribute("onclick"); |
||||
const regExpFunc = /download_video\('(.+?)','(.+?)','(.+?)'\)/; |
||||
const matchesFunc = regExpFunc.exec(funcContents ?? ""); |
||||
if (matchesFunc !== null) { |
||||
const quality = func.querySelector("span")?.textContent; |
||||
const regExpQuality = /(.+?) \((.+?)\)/; |
||||
const matchesQuality = regExpQuality.exec(quality ?? ""); |
||||
if (matchesQuality !== null) { |
||||
dlDetails.push({ |
||||
parameters: [matchesFunc[1], matchesFunc[2], matchesFunc[3]], |
||||
quality: { |
||||
label: matchesQuality[1].trim(), |
||||
size: matchesQuality[2], |
||||
}, |
||||
}); |
||||
} |
||||
} |
||||
} |
||||
|
||||
progress(40); |
||||
|
||||
let dls = await Promise.all( |
||||
dlDetails.map(async (dl) => { |
||||
const getDownload = await proxiedFetch<any>( |
||||
`/dl?op=download_orig&id=${dl.parameters[0]}&mode=${dl.parameters[1]}&hash=${dl.parameters[2]}`, |
||||
{ |
||||
baseURL: parsedUrl.origin, |
||||
} |
||||
); |
||||
|
||||
const downloadPage = new DOMParser().parseFromString( |
||||
getDownload, |
||||
"text/html" |
||||
); |
||||
|
||||
const recaptchaKey = downloadPage |
||||
.querySelector(".g-recaptcha") |
||||
?.getAttribute("data-sitekey"); |
||||
if (!recaptchaKey) throw new Error("Unable to get captcha key"); |
||||
|
||||
const captchaToken = await fetchCaptchaToken( |
||||
parsedUrl.origin, |
||||
recaptchaKey |
||||
); |
||||
if (!captchaToken) throw new Error("Unable to get captcha token"); |
||||
|
||||
const dlForm = new FormData(); |
||||
dlForm.append("op", "download_orig"); |
||||
dlForm.append("id", dl.parameters[0]); |
||||
dlForm.append("mode", dl.parameters[1]); |
||||
dlForm.append("hash", dl.parameters[2]); |
||||
dlForm.append("g-recaptcha-response", captchaToken); |
||||
|
||||
const download = await proxiedFetch<any>( |
||||
`/dl?op=download_orig&id=${dl.parameters[0]}&mode=${dl.parameters[1]}&hash=${dl.parameters[2]}`, |
||||
{ |
||||
baseURL: parsedUrl.origin, |
||||
method: "POST", |
||||
body: dlForm, |
||||
} |
||||
); |
||||
|
||||
const dlLink = new DOMParser() |
||||
.parseFromString(download, "text/html") |
||||
.querySelector(".btn.btn-light.btn-lg") |
||||
?.getAttribute("href"); |
||||
|
||||
console.log(dlLink); |
||||
|
||||
return { |
||||
quality: dl.quality.label as MWStreamQuality, |
||||
url: dlLink, |
||||
size: dl.quality.size, |
||||
captions: |
||||
captionUrl && captionLang |
||||
? [ |
||||
{ |
||||
url: captionUrl, |
||||
langIso: captionLang, |
||||
type: MWCaptionType.VTT, |
||||
}, |
||||
] |
||||
: [], |
||||
}; |
||||
}) |
||||
); |
||||
dls = dls.filter((d) => !!d.url); |
||||
dls = dls.sort((a, b) => { |
||||
const aQuality = qualityOrder.indexOf(a.quality); |
||||
const bQuality = qualityOrder.indexOf(b.quality); |
||||
return aQuality - bQuality; |
||||
}); |
||||
|
||||
progress(60); |
||||
|
||||
// TODO: Quality selection for embed scrapers
|
||||
const dl = dls[0]; |
||||
if (!dl.url) throw new Error("No stream url found"); |
||||
|
||||
return { |
||||
embedId: MWEmbedType.STREAMSB, |
||||
streamUrl: dl.url, |
||||
quality: dl.quality, |
||||
captions: dl.captions, |
||||
type: MWStreamType.MP4, |
||||
}; |
||||
}, |
||||
}); |
@ -0,0 +1,93 @@
@@ -0,0 +1,93 @@
|
||||
import { AES, enc } from "crypto-js"; |
||||
|
||||
import { MWEmbedType } from "@/backend/helpers/embed"; |
||||
import { registerEmbedScraper } from "@/backend/helpers/register"; |
||||
import { |
||||
MWCaptionType, |
||||
MWStreamQuality, |
||||
MWStreamType, |
||||
} from "@/backend/helpers/streams"; |
||||
|
||||
import { proxiedFetch } from "../helpers/fetch"; |
||||
|
||||
interface StreamRes { |
||||
server: number; |
||||
sources: string; |
||||
tracks: { |
||||
file: string; |
||||
kind: "captions" | "thumbnails"; |
||||
label: string; |
||||
}[]; |
||||
} |
||||
|
||||
function isJSON(json: string) { |
||||
try { |
||||
JSON.parse(json); |
||||
return true; |
||||
} catch { |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
registerEmbedScraper({ |
||||
id: "upcloud", |
||||
displayName: "UpCloud", |
||||
for: MWEmbedType.UPCLOUD, |
||||
rank: 200, |
||||
async getStream({ url }) { |
||||
// Example url: https://dokicloud.one/embed-4/{id}?z=
|
||||
const parsedUrl = new URL(url.replace("embed-5", "embed-4")); |
||||
|
||||
const dataPath = parsedUrl.pathname.split("/"); |
||||
const dataId = dataPath[dataPath.length - 1]; |
||||
|
||||
const streamRes = await proxiedFetch<StreamRes>( |
||||
`${parsedUrl.origin}/ajax/embed-4/getSources?id=${dataId}`, |
||||
{ |
||||
headers: { |
||||
Referer: parsedUrl.origin, |
||||
"X-Requested-With": "XMLHttpRequest", |
||||
}, |
||||
} |
||||
); |
||||
|
||||
let sources: |
||||
| { |
||||
file: string; |
||||
type: string; |
||||
} |
||||
| string = streamRes.sources; |
||||
|
||||
if (!isJSON(sources) || typeof sources === "string") { |
||||
const decryptionKey = await proxiedFetch<string>( |
||||
`https://raw.githubusercontent.com/enimax-anime/key/e4/key.txt` |
||||
); |
||||
|
||||
const decryptedStream = AES.decrypt(sources, decryptionKey).toString( |
||||
enc.Utf8 |
||||
); |
||||
|
||||
const parsedStream = JSON.parse(decryptedStream)[0]; |
||||
if (!parsedStream) throw new Error("No stream found"); |
||||
sources = parsedStream as { file: string; type: string }; |
||||
} |
||||
|
||||
return { |
||||
embedId: MWEmbedType.UPCLOUD, |
||||
streamUrl: sources.file, |
||||
quality: MWStreamQuality.Q1080P, |
||||
type: MWStreamType.HLS, |
||||
captions: streamRes.tracks |
||||
.filter((sub) => sub.kind === "captions") |
||||
.map((sub) => { |
||||
return { |
||||
langIso: sub.label, |
||||
url: sub.file, |
||||
type: sub.file.endsWith("vtt") |
||||
? MWCaptionType.VTT |
||||
: MWCaptionType.UNKNOWN, |
||||
}; |
||||
}), |
||||
}; |
||||
}, |
||||
}); |
Loading…
Reference in new issue