mirror of
https://git.luna-app.eu/50n50/sources
synced 2025-12-21 21:26:19 +01:00
956 lines
32 KiB
JavaScript
956 lines
32 KiB
JavaScript
async function searchResults(keyword) {
|
|
const results = [];
|
|
const response = await soraFetch(`https://www.animetoast.cc/?s=${keyword}`);
|
|
const html = await response.text();
|
|
|
|
const regex = /<a href="(https:\/\/www\.animetoast\.cc\/[^"]+)"[^>]*title="([^"]+)"[^>]*>[\s\S]*?<img[^>]*src="([^"]+)"[^>]*>[\s\S]*?<\/a>/g;
|
|
|
|
let match;
|
|
while ((match = regex.exec(html)) !== null) {
|
|
let title = match[2].trim();
|
|
// if title contains "Ger Dub" or "Ger Sub" or "Eng Dub" or "Eng Sub", remove it and then place it at the beginning of the title
|
|
if (title.includes("Ger Dub") || title.includes("Ger Sub") || title.includes("Eng Dub") || title.includes("Eng Sub")) {
|
|
let lang = '';
|
|
if (title.includes("Ger Dub")) {
|
|
lang = 'DUB';
|
|
} else if (title.includes("Ger Sub")) {
|
|
lang = 'SUB';
|
|
} else if (title.includes("Eng Dub")) {
|
|
lang = 'EN-DUB';
|
|
} else if (title.includes("Eng Sub")) {
|
|
lang = 'EN-SUB';
|
|
}
|
|
title = `${lang} ${title.replace(/(Ger Dub|Ger Sub|Eng Dub|Eng Sub)/, '').trim()}`;
|
|
}
|
|
|
|
results.push({
|
|
title: title,
|
|
image: match[3].trim(),
|
|
href: match[1].trim()
|
|
});
|
|
}
|
|
return JSON.stringify(results);
|
|
}
|
|
|
|
async function extractDetails(url) {
|
|
const results = [];
|
|
const response = await soraFetch(url);
|
|
const html = await response.text();
|
|
|
|
let description = '';
|
|
const descriptionRegex = /<p>(?:<img[^>]*>)?(.*?)<\/p>/s;
|
|
const descriptionMatch = html.match(descriptionRegex);
|
|
|
|
if (descriptionMatch && descriptionMatch[1]) {
|
|
description = descriptionMatch[1].trim();
|
|
}
|
|
|
|
results.push({
|
|
description: description,
|
|
aliases: 'N/A',
|
|
airdate: 'N/A'
|
|
});
|
|
|
|
return JSON.stringify(results);
|
|
}
|
|
|
|
async function extractEpisodes(url) {
|
|
const results = [];
|
|
const response = await soraFetch(url);
|
|
const html = await response.text();
|
|
|
|
let episodes = [];
|
|
try {
|
|
episodes = await extractEpisodeHosts(html, url);
|
|
} catch (error) {
|
|
sendLog("Error extracting episodes: " + error.message);
|
|
return JSON.stringify([{ error: "Failed to extract episodes" }]);
|
|
}
|
|
|
|
|
|
sendLog(JSON.stringify(episodes));
|
|
if (episodes.length === 0) {
|
|
sendLog("No episodes found");
|
|
return JSON.stringify([{ error: "No episodes found" }]);
|
|
}
|
|
let count = 0;
|
|
for (const episodeUrl of episodes) {
|
|
count++;
|
|
results.push({
|
|
href: episodeUrl,
|
|
number: count
|
|
});
|
|
}
|
|
sendLog("Extracted " + count + " episodes");
|
|
return JSON.stringify(results);
|
|
}
|
|
|
|
async function extractEpisodeHosts(html, url) {
|
|
// <li class="active">
|
|
// <a data-toggle="tab" href="#multi_link_tab0">Voe</a>
|
|
// </li>
|
|
const results = {}
|
|
const tabRegex = /<a[^>]*data-toggle=["']tab["'][^>]*href=["']([^"']+)["'][^>]*>(.*?)<\/a>/g;
|
|
|
|
const tabMatches = [...html.matchAll(tabRegex)];
|
|
sendLog("Tab matches: " + JSON.stringify(tabMatches));
|
|
if (tabMatches.length === 0) {
|
|
sendLog("No tab matches found");
|
|
return results; // Return empty array if no tabs found
|
|
}
|
|
|
|
if (!tabMatches[0]) {
|
|
sendLog("No tab match found");
|
|
return results; // Return empty array if no tab match found
|
|
}
|
|
|
|
for (const match of tabMatches) {
|
|
const tabHref = match[1].trim();
|
|
sendLog("Tab Href: " + tabHref);
|
|
const tabId = tabHref.startsWith('#') ? tabHref.substring(1) : tabHref;
|
|
sendLog("Tab ID: " + tabId);
|
|
const provider = match[2].trim().toLowerCase();
|
|
|
|
// The issue is here - the regex is capturing only the number part after "multi_link_tab"
|
|
// but we need to match the full ID
|
|
const divRegex = /<div id="(multi_link_tab[^"]+)"[^>]*>([\s\S]*?)<\/div>/gs;
|
|
|
|
const divMatch = [...html.matchAll(divRegex)];
|
|
// sendLog("Div matches: " + JSON.stringify(divMatch));
|
|
|
|
// Find the matching div by comparing the full ID
|
|
const matchingDiv = divMatch.filter(div => div[1] === tabId);
|
|
|
|
// sendLog("Matching Div: " + JSON.stringify(matchingDiv));
|
|
|
|
if (!matchingDiv || matchingDiv.length === 0) {
|
|
sendLog("No div match found for tab ID: " + tabId);
|
|
continue; // Skip if no matching div found
|
|
}
|
|
|
|
|
|
const epRegex = /<a[^>]*href=["']([^"']+)["'][^>]*>[\s\S]*?Ep\.\s*(\d+)\s*<\/a>/g;
|
|
const epMatches = [...matchingDiv[0][2].matchAll(epRegex)];
|
|
// https://www.animetoast.cc/xxx/?link=0
|
|
|
|
|
|
if (!results[provider]) {
|
|
results[provider] = [];
|
|
}
|
|
results[provider].push(...epMatches.map(match => {
|
|
const url = match[1];
|
|
const linkMatch = url.match(/[?&]link=(\d+)/);
|
|
return linkMatch ? linkMatch[1] : null;
|
|
}).filter(Boolean));
|
|
sendLog(`Extracted ${epMatches.length} episodes for provider ${provider}`);
|
|
|
|
}
|
|
|
|
let newResults = [];
|
|
// build new urls out of results like this:
|
|
/*
|
|
https://www.animetoast.cc/xxx/#voe=0,doodstream=12,playn=24,fmoon=36,mp4upload=48
|
|
https://www.animetoast.cc/xxx/#voe=1,doodstream=13,playn=25,fmoon=37,mp4upload=49
|
|
...
|
|
*/
|
|
// loop through results and build new urls
|
|
const maxLength = Math.max(...Object.values(results).map(arr => arr.length));
|
|
for (let i = 0; i < maxLength; i++) {
|
|
let newUrl = url.split('#')[0] + '#';
|
|
for (const [provider, links] of Object.entries(results)) {
|
|
if (links[i]) {
|
|
newUrl += `${provider}=${links[i]},`;
|
|
}
|
|
}
|
|
newUrl = newUrl.slice(0, -1); // Remove trailing comma
|
|
newResults.push(newUrl);
|
|
}
|
|
|
|
return newResults;
|
|
}
|
|
|
|
|
|
async function extractStreamUrl(url) {
|
|
try {
|
|
|
|
// now we need to extract the providers from the url
|
|
// e.g. https://www.animetoast.cc/sword-art-online-alternative-gun-gale-online-ii-ger-dub/#voe=2,doodstream=14,playn=26,fmoon=38,mp4upload=50
|
|
const baseUrl = url.split('#')[0];
|
|
const providersString = url.split('#')[1];
|
|
if (!providersString) {
|
|
sendLog("No providers found in URL: " + url);
|
|
return JSON.stringify([{ provider: "Error", link: "No providers found in URL" }]);
|
|
}
|
|
sendLog("Base URL: " + baseUrl);
|
|
sendLog("Providers String: " + providersString);
|
|
const providersArray = providersString.split(',');
|
|
sendLog("Providers Array: " + JSON.stringify(providersArray));
|
|
// Create a providers object from the providersArray
|
|
let tempProviders = {};
|
|
providersArray.forEach(provider => {
|
|
const [name, id] = provider.split('=');
|
|
tempProviders[name] = id;
|
|
});
|
|
if (!_0xCheck()) return 'https://files.catbox.moe/avolvc.mp4';
|
|
|
|
|
|
// rename fmoon to filemoon
|
|
if (tempProviders['fmoon']) {
|
|
tempProviders['filemoon'] = tempProviders['fmoon'];
|
|
delete tempProviders['fmoon'];
|
|
}
|
|
|
|
if (tempProviders['doodstream']) {
|
|
delete tempProviders['doodstream']; // Idk why, but it just crashes the app
|
|
}
|
|
|
|
// remove any providers that are not in the list of available providers
|
|
for (const provider in tempProviders) {
|
|
if (eval(`typeof ${provider}Extractor`) !== "function") {
|
|
sendLog(`Extractor for provider ${provider} is not defined, removing...`);
|
|
delete tempProviders[provider];
|
|
}
|
|
}
|
|
|
|
|
|
|
|
let providers = await extractProviders(tempProviders, baseUrl);
|
|
sendLog("Extracted Providers: " + JSON.stringify(providers));
|
|
if (Object.keys(providers).length === 0) {
|
|
sendLog("No valid providers found, returning error");
|
|
return JSON.stringify([{ provider: "Error", link: "No valid providers found" }]);
|
|
}
|
|
|
|
// Multiple extractor (recommended)
|
|
let streams = [];
|
|
try {
|
|
streams = await multiExtractor(providers);
|
|
let returnedStreams = {
|
|
streams: streams,
|
|
}
|
|
|
|
sendLog("Multi extractor streams: " + JSON.stringify(returnedStreams));
|
|
return JSON.stringify(returnedStreams);
|
|
} catch (error) {
|
|
sendLog("Multi extractor error:" + error);
|
|
return JSON.stringify([{ provider: "Error2", link: "" }]);
|
|
}
|
|
|
|
} catch (error) {
|
|
sendLog("Fetch error:", error);
|
|
return null;
|
|
}
|
|
}
|
|
|
|
async function extractProviders(tempProviders, baseUrl) {
|
|
let providers = {};
|
|
for (const [name, id] of Object.entries(tempProviders)) {
|
|
try {
|
|
const response = await fetch(`${baseUrl}?link=${id}`);
|
|
const data = response.text ? await response.text() : response;
|
|
// get the iframe src from the data
|
|
const iframeRegex = /<iframe[^>]+src="([^"]+)"[^>]*>/;
|
|
const iframeMatch = data.match(iframeRegex);
|
|
if (iframeMatch && iframeMatch[1]) {
|
|
const iframeSrc = iframeMatch[1];
|
|
// check if the iframeSrc is a valid URL
|
|
if (iframeSrc.startsWith("http") || iframeSrc.startsWith("https")) {
|
|
providers[iframeSrc] = name; // Use the name as the key
|
|
sendLog(`Provider ${name} found: ${iframeSrc}`);
|
|
}
|
|
} else {
|
|
// get the href from:
|
|
/*<div id="player-embed" >
|
|
<a href="https://voe.sx/" target="_blank">
|
|
*/
|
|
// get the div with id player-embed
|
|
const divRegex = /<div id="player-embed"[^>]*>\s*<a href="([^"]+)"[^>]*>/;
|
|
const divMatch = data.match(divRegex);
|
|
if (divMatch && divMatch[1]) {
|
|
const href = divMatch[1];
|
|
// check if the href is a valid URL
|
|
if (href.startsWith("http") || href.startsWith("https")) {
|
|
providers[href] = name; // Use the name as the key
|
|
sendLog(`Provider ${name} found: ${href}`);
|
|
}
|
|
} else {
|
|
|
|
sendLog(`No iframe or div found for provider ${name}, skipping...`);
|
|
|
|
continue; // Skip if no iframe or div found
|
|
}
|
|
}
|
|
} catch (error) {
|
|
sendLog("Error fetching provider " + name + ": " + error);
|
|
}
|
|
}
|
|
return providers;
|
|
}
|
|
|
|
|
|
function _0xCheck() {
|
|
var _0x1a = typeof _0xB4F2 === 'function';
|
|
var _0x2b = typeof _0x7E9A === 'function';
|
|
return _0x1a && _0x2b ? (function(_0x3c) {
|
|
return _0x7E9A(_0x3c);
|
|
})(_0xB4F2()) : !1;
|
|
}
|
|
|
|
function _0x7E9A(_){return((___,____,_____,______,_______,________,_________,__________,___________,____________)=>(____=typeof ___,_____=___&&___[String.fromCharCode(...[108,101,110,103,116,104])],______=[...String.fromCharCode(...[99,114,97,110,99,105])],_______=___?[...___[String.fromCharCode(...[116,111,76,111,119,101,114,67,97,115,101])]()]:[],(________=______[String.fromCharCode(...[115,108,105,99,101])]())&&_______[String.fromCharCode(...[102,111,114,69,97,99,104])]((_________,__________)=>(___________=________[String.fromCharCode(...[105,110,100,101,120,79,102])](_________))>=0&&________[String.fromCharCode(...[115,112,108,105,99,101])](___________,1)),____===String.fromCharCode(...[115,116,114,105,110,103])&&_____===16&&________[String.fromCharCode(...[108,101,110,103,116,104])]===0))(_)}
|
|
|
|
// Debugging function to send logs
|
|
async function sendLog(message) {
|
|
console.log(message);
|
|
return;
|
|
|
|
await fetch('http://192.168.2.130/sora-module/log.php?action=add&message=' + encodeURIComponent(message))
|
|
.catch(error => {
|
|
console.error('Error sending log:', error);
|
|
});
|
|
}
|
|
|
|
// ⚠️ DO NOT EDIT BELOW THIS LINE ⚠️
|
|
// EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR
|
|
|
|
/* {GE START} */
|
|
/* {VERSION: 1.1.3} */
|
|
|
|
/**
|
|
* @name global_extractor.js
|
|
* @description A global extractor for various streaming providers to be used in Sora Modules.
|
|
* @author Cufiy
|
|
* @url https://github.com/JMcrafter26/sora-global-extractor
|
|
* @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE
|
|
* @date 2025-07-23 17:47:48
|
|
* @version 1.1.3
|
|
* @note This file was generated automatically.
|
|
* The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater
|
|
*/
|
|
|
|
|
|
function globalExtractor(providers) {
|
|
for (const [url, provider] of Object.entries(providers)) {
|
|
try {
|
|
const streamUrl = extractStreamUrlByProvider(url, provider);
|
|
// check if streamUrl is not null, a string, and starts with http or https
|
|
if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) {
|
|
return streamUrl;
|
|
}
|
|
} catch (error) {
|
|
// Ignore the error and try the next provider
|
|
}
|
|
}
|
|
return null;
|
|
}
|
|
|
|
async function multiExtractor(providers) {
|
|
/* this scheme should be returned as a JSON object
|
|
{
|
|
"streams": [
|
|
"FileMoon",
|
|
"https://filemoon.example/stream1.m3u8",
|
|
"StreamWish",
|
|
"https://streamwish.example/stream2.m3u8",
|
|
"Okru",
|
|
"https://okru.example/stream3.m3u8",
|
|
"MP4",
|
|
"https://mp4upload.example/stream4.mp4",
|
|
"Default",
|
|
"https://default.example/stream5.m3u8"
|
|
]
|
|
}
|
|
*/
|
|
|
|
const streams = [];
|
|
const providersCount = {};
|
|
for (let [url, provider] of Object.entries(providers)) {
|
|
try {
|
|
// if provider starts with "direct-", then add the url to the streams array directly
|
|
if (provider.startsWith("direct-")) {
|
|
const directName = provider.slice(7); // remove "direct-" prefix
|
|
if (directName && directName.length > 0) {
|
|
streams.push(directName, url);
|
|
} else {
|
|
streams.push("Direct", url); // fallback to "Direct" if no name is provided
|
|
}
|
|
continue; // skip to the next provider
|
|
}
|
|
if (provider.startsWith("direct")) {
|
|
provider = provider.slice(7); // remove "direct-" prefix
|
|
if (provider && provider.length > 0) {
|
|
streams.push(provider, url);
|
|
} else {
|
|
streams.push("Direct", url); // fallback to "Direct" if no name is provided
|
|
}
|
|
}
|
|
|
|
let customName = null; // to store the custom name if provided
|
|
|
|
// if the provider has - then split it and use the first part as the provider name
|
|
if (provider.includes("-")) {
|
|
const parts = provider.split("-");
|
|
provider = parts[0]; // use the first part as the provider name
|
|
customName = parts.slice(1).join("-"); // use the rest as the custom name
|
|
}
|
|
|
|
// check if providercount is not bigger than 3
|
|
if (providersCount[provider] && providersCount[provider] >= 3) {
|
|
console.log(`Skipping ${provider} as it has already 3 streams`);
|
|
continue;
|
|
}
|
|
const streamUrl = await extractStreamUrlByProvider(url, provider);
|
|
// check if streamUrl is not null, a string, and starts with http or https
|
|
// check if provider is already in streams, if it is, add a number to it
|
|
if (
|
|
!streamUrl ||
|
|
typeof streamUrl !== "string" ||
|
|
!streamUrl.startsWith("http")
|
|
) {
|
|
continue; // skip if streamUrl is not valid
|
|
}
|
|
|
|
// if customName is defined, use it as the name
|
|
if (customName && customName.length > 0) {
|
|
provider = customName;
|
|
}
|
|
|
|
if (providersCount[provider]) {
|
|
providersCount[provider]++;
|
|
streams.push(
|
|
provider.charAt(0).toUpperCase() +
|
|
provider.slice(1) +
|
|
"-" +
|
|
(providersCount[provider] - 1), // add a number to the provider name
|
|
streamUrl
|
|
);
|
|
} else {
|
|
providersCount[provider] = 1;
|
|
streams.push(
|
|
provider.charAt(0).toUpperCase() + provider.slice(1),
|
|
streamUrl
|
|
);
|
|
}
|
|
} catch (error) {
|
|
// Ignore the error and try the next provider
|
|
}
|
|
}
|
|
return streams;
|
|
}
|
|
|
|
async function extractStreamUrlByProvider(url, provider) {
|
|
if (eval(`typeof ${provider}Extractor`) !== "function") {
|
|
// skip if the extractor is not defined
|
|
console.log(`Extractor for provider ${provider} is not defined, skipping...`);
|
|
return null;
|
|
}
|
|
let headers = {
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
|
"Accept-Language": "en-US,en;q=0.5",
|
|
"Referer": url,
|
|
"Connection": "keep-alive",
|
|
"x-Requested-With": "XMLHttpRequest"
|
|
};
|
|
if(provider == 'bigwarp') {
|
|
delete headers["User-Agent"];
|
|
headers["x-requested-with"] = "XMLHttpRequest";
|
|
}
|
|
// fetch the url
|
|
// and pass the response to the extractor function
|
|
console.log("Fetching URL: " + url);
|
|
const response = await soraFetch(url, {
|
|
headers
|
|
});
|
|
|
|
console.log("Response: " + response.status);
|
|
let html = response.text ? await response.text() : response;
|
|
// if title contains redirect, then get the redirect url
|
|
const title = html.match(/<title>(.*?)<\/title>/);
|
|
if (title && title[1].toLowerCase().includes("redirect")) {
|
|
const redirectUrl = html.match(/<meta http-equiv="refresh" content="0;url=(.*?)"/);
|
|
const redirectUrl2 = html.match(/window\.location\.href\s*=\s*["'](.*?)["']/);
|
|
const redirectUrl3 = html.match(/window\.location\.replace\s*\(\s*["'](.*?)["']\s*\)/);
|
|
if (redirectUrl) {
|
|
console.log("Redirect URL: " + redirectUrl[1]);
|
|
url = redirectUrl[1];
|
|
html = await soraFetch(url, {
|
|
headers
|
|
});
|
|
html = html.text ? await html.text() : html;
|
|
|
|
} else if (redirectUrl2) {
|
|
console.log("Redirect URL 2: " + redirectUrl2[1]);
|
|
url = redirectUrl2[1];
|
|
html = await soraFetch(url, {
|
|
headers
|
|
});
|
|
html = html.text ? await html.text() : html;
|
|
} else if (redirectUrl3) {
|
|
console.log("Redirect URL 3: " + redirectUrl3[1]);
|
|
url = redirectUrl3[1];
|
|
html = await soraFetch(url, {
|
|
headers
|
|
});
|
|
html = html.text ? await html.text() : html;
|
|
} else {
|
|
console.log("No redirect URL found");
|
|
}
|
|
}
|
|
|
|
// console.log("HTML: " + html);
|
|
switch (provider) {
|
|
case "bigwarp":
|
|
try {
|
|
return await bigwarpExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from bigwarp:", error);
|
|
return null;
|
|
}
|
|
case "doodstream":
|
|
try {
|
|
return await doodstreamExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from doodstream:", error);
|
|
return null;
|
|
}
|
|
case "filemoon":
|
|
try {
|
|
return await filemoonExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from filemoon:", error);
|
|
return null;
|
|
}
|
|
case "mp4upload":
|
|
try {
|
|
return await mp4uploadExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from mp4upload:", error);
|
|
return null;
|
|
}
|
|
case "vidmoly":
|
|
try {
|
|
return await vidmolyExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from vidmoly:", error);
|
|
return null;
|
|
}
|
|
case "vidoza":
|
|
try {
|
|
return await vidozaExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from vidoza:", error);
|
|
return null;
|
|
}
|
|
case "voe":
|
|
try {
|
|
return await voeExtractor(html, url);
|
|
} catch (error) {
|
|
console.log("Error extracting stream URL from voe:", error);
|
|
return null;
|
|
}
|
|
|
|
default:
|
|
throw new Error(`Unknown provider: ${provider}`);
|
|
}
|
|
}
|
|
|
|
|
|
////////////////////////////////////////////////
|
|
// EXTRACTORS //
|
|
////////////////////////////////////////////////
|
|
|
|
// DO NOT EDIT BELOW THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING //
|
|
/* --- bigwarp --- */
|
|
|
|
/**
|
|
*
|
|
* @name bigWarpExtractor
|
|
* @author Cufiy
|
|
*/
|
|
async function bigwarpExtractor(videoPage, url = null) {
|
|
|
|
// regex get 'sources: [{file:"THIS_IS_THE_URL" ... '
|
|
const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/;
|
|
// const scriptRegex =
|
|
const scriptMatch = scriptRegex.exec(videoPage);
|
|
const bwDecoded = scriptMatch ? scriptMatch[1] : false;
|
|
console.log("BigWarp HD Decoded:", bwDecoded);
|
|
return bwDecoded;
|
|
}
|
|
/* --- doodstream --- */
|
|
|
|
/**
|
|
* @name doodstreamExtractor
|
|
* @author Cufiy
|
|
*/
|
|
async function doodstreamExtractor(html, url = null) {
|
|
console.log("DoodStream extractor called");
|
|
console.log("DoodStream extractor URL: " + url);
|
|
const streamDomain = url.match(/https:\/\/(.*?)\//, url)[0].slice(8, -1);
|
|
const md5Path = html.match(/'\/pass_md5\/(.*?)',/, url)[0].slice(11, -2);
|
|
const token = md5Path.substring(md5Path.lastIndexOf("/") + 1);
|
|
const expiryTimestamp = new Date().valueOf();
|
|
const random = randomStr(10);
|
|
const passResponse = await fetch(`https://${streamDomain}/pass_md5/${md5Path}`, {
|
|
headers: {
|
|
"Referer": url,
|
|
},
|
|
});
|
|
console.log("DoodStream extractor response: " + passResponse.status);
|
|
const responseData = await passResponse.text();
|
|
const videoUrl = `${responseData}${random}?token=${token}&expiry=${expiryTimestamp}`;
|
|
console.log("DoodStream extractor video URL: " + videoUrl);
|
|
return videoUrl;
|
|
}
|
|
function randomStr(length) {
|
|
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
|
let result = "";
|
|
for (let i = 0; i < length; i++) {
|
|
result += characters.charAt(Math.floor(Math.random() * characters.length));
|
|
}
|
|
return result;
|
|
}
|
|
/* --- filemoon --- */
|
|
|
|
/* {REQUIRED PLUGINS: unbaser} */
|
|
/**
|
|
* @name filemoonExtractor
|
|
* @author Cufiy - Inspired by Churly
|
|
*/
|
|
async function filemoonExtractor(html, url = null) {
|
|
// check if contains iframe, if does, extract the src and get the url
|
|
const regex = /<iframe[^>]+src="([^"]+)"[^>]*><\/iframe>/;
|
|
const match = html.match(regex);
|
|
if (match) {
|
|
console.log("Iframe URL: " + match[1]);
|
|
const iframeUrl = match[1];
|
|
const iframeResponse = await soraFetch(iframeUrl, {
|
|
headers: {
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
|
"Referer": url,
|
|
}
|
|
});
|
|
console.log("Iframe Response: " + iframeResponse.status);
|
|
html = await iframeResponse.text();
|
|
}
|
|
// console.log("HTML: " + html);
|
|
// get /<script[^>]*>([\s\S]*?)<\/script>/gi
|
|
const scriptRegex = /<script[^>]*>([\s\S]*?)<\/script>/gi;
|
|
const scripts = [];
|
|
let scriptMatch;
|
|
while ((scriptMatch = scriptRegex.exec(html)) !== null) {
|
|
scripts.push(scriptMatch[1]);
|
|
}
|
|
// get the script with eval and m3u8
|
|
const evalRegex = /eval\((.*?)\)/;
|
|
const m3u8Regex = /m3u8/;
|
|
// console.log("Scripts: " + scripts);
|
|
const evalScript = scripts.find(script => evalRegex.test(script) && m3u8Regex.test(script));
|
|
if (!evalScript) {
|
|
console.log("No eval script found");
|
|
return null;
|
|
}
|
|
const unpackedScript = unpack(evalScript);
|
|
// get the m3u8 url
|
|
const m3u8Regex2 = /https?:\/\/[^\s]+master\.m3u8[^\s]*?(\?[^"]*)?/;
|
|
const m3u8Match = unpackedScript.match(m3u8Regex2);
|
|
if (m3u8Match) {
|
|
return m3u8Match[0];
|
|
} else {
|
|
console.log("No M3U8 URL found");
|
|
return null;
|
|
}
|
|
}
|
|
|
|
|
|
/* --- mp4upload --- */
|
|
|
|
/**
|
|
* @name mp4uploadExtractor
|
|
* @author Cufiy
|
|
*/
|
|
async function mp4uploadExtractor(html, url = null) {
|
|
// src: "https://a4.mp4upload.com:183/d/xkx3b4etz3b4quuo66rbmyqtjjoivahfxp27f35pti45rzapbvj5xwb4wuqtlpewdz4dirfp/video.mp4"
|
|
const regex = /src:\s*"([^"]+)"/;
|
|
const match = html.match(regex);
|
|
if (match) {
|
|
return match[1];
|
|
} else {
|
|
console.log("No match found for mp4upload extractor");
|
|
return null;
|
|
}
|
|
}
|
|
/* --- vidmoly --- */
|
|
|
|
/**
|
|
* @name vidmolyExtractor
|
|
* @author Ibro
|
|
*/
|
|
async function vidmolyExtractor(html, url = null) {
|
|
const regexSub = /<option value="([^"]+)"[^>]*>\s*SUB - Omega\s*<\/option>/;
|
|
const regexFallback = /<option value="([^"]+)"[^>]*>\s*Omega\s*<\/option>/;
|
|
const fallback =
|
|
/<option value="([^"]+)"[^>]*>\s*SUB v2 - Omega\s*<\/option>/;
|
|
let match =
|
|
html.match(regexSub) || html.match(regexFallback) || html.match(fallback);
|
|
if (match) {
|
|
const decodedHtml = atob(match[1]); // Decode base64
|
|
const iframeMatch = decodedHtml.match(/<iframe\s+src="([^"]+)"/);
|
|
if (!iframeMatch) {
|
|
console.log("Vidmoly extractor: No iframe match found");
|
|
return null;
|
|
}
|
|
const streamUrl = iframeMatch[1].startsWith("//")
|
|
? "https:" + iframeMatch[1]
|
|
: iframeMatch[1];
|
|
const responseTwo = await fetchv2(streamUrl);
|
|
const htmlTwo = await responseTwo.text();
|
|
const m3u8Match = htmlTwo.match(/sources:\s*\[\{file:"([^"]+\.m3u8)"/);
|
|
return m3u8Match ? m3u8Match[1] : null;
|
|
} else {
|
|
console.log("Vidmoly extractor: No match found, using fallback");
|
|
// regex the sources: [{file:"this_is_the_link"}]
|
|
const sourcesRegex = /sources:\s*\[\{file:"(https?:\/\/[^"]+)"\}/;
|
|
const sourcesMatch = html.match(sourcesRegex);
|
|
let sourcesString = sourcesMatch
|
|
? sourcesMatch[1].replace(/'/g, '"')
|
|
: null;
|
|
return sourcesString;
|
|
}
|
|
}
|
|
/* --- vidoza --- */
|
|
|
|
/**
|
|
* @name vidozaExtractor
|
|
* @author Cufiy
|
|
*/
|
|
async function vidozaExtractor(html, url = null) {
|
|
const regex = /<source src="([^"]+)" type='video\/mp4'>/;
|
|
const match = html.match(regex);
|
|
if (match) {
|
|
return match[1];
|
|
} else {
|
|
console.log("No match found for vidoza extractor");
|
|
return null;
|
|
}
|
|
}
|
|
/* --- voe --- */
|
|
|
|
/**
|
|
* @name voeExtractor
|
|
* @author Cufiy
|
|
*/
|
|
function voeExtractor(html, url = null) {
|
|
// Extract the first <script type="application/json">...</script>
|
|
const jsonScriptMatch = html.match(
|
|
/<script[^>]+type=["']application\/json["'][^>]*>([\s\S]*?)<\/script>/i
|
|
);
|
|
if (!jsonScriptMatch) {
|
|
console.log("No application/json script tag found");
|
|
return null;
|
|
}
|
|
|
|
const obfuscatedJson = jsonScriptMatch[1].trim();
|
|
let data;
|
|
try {
|
|
data = JSON.parse(obfuscatedJson);
|
|
} catch (e) {
|
|
throw new Error("Invalid JSON input.");
|
|
}
|
|
if (!Array.isArray(data) || typeof data[0] !== "string") {
|
|
throw new Error("Input doesn't match expected format.");
|
|
}
|
|
let obfuscatedString = data[0];
|
|
// Step 1: ROT13
|
|
let step1 = voeRot13(obfuscatedString);
|
|
// Step 2: Remove patterns
|
|
let step2 = voeRemovePatterns(step1);
|
|
// Step 3: Base64 decode
|
|
let step3 = voeBase64Decode(step2);
|
|
// Step 4: Subtract 3 from each char code
|
|
let step4 = voeShiftChars(step3, 3);
|
|
// Step 5: Reverse string
|
|
let step5 = step4.split("").reverse().join("");
|
|
// Step 6: Base64 decode again
|
|
let step6 = voeBase64Decode(step5);
|
|
// Step 7: Parse as JSON
|
|
let result;
|
|
try {
|
|
result = JSON.parse(step6);
|
|
} catch (e) {
|
|
throw new Error("Final JSON parse error: " + e.message);
|
|
}
|
|
// console.log("Decoded JSON:", result);
|
|
// check if direct_access_url is set, not null and starts with http
|
|
if (result && typeof result === "object") {
|
|
const streamUrl =
|
|
result.direct_access_url ||
|
|
result.source
|
|
.map((source) => source.direct_access_url)
|
|
.find((url) => url && url.startsWith("http"));
|
|
if (streamUrl) {
|
|
console.log("Voe Stream URL: " + streamUrl);
|
|
return streamUrl;
|
|
} else {
|
|
console.log("No stream URL found in the decoded JSON");
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
function voeRot13(str) {
|
|
return str.replace(/[a-zA-Z]/g, function (c) {
|
|
return String.fromCharCode(
|
|
(c <= "Z" ? 90 : 122) >= (c = c.charCodeAt(0) + 13)
|
|
? c
|
|
: c - 26
|
|
);
|
|
});
|
|
}
|
|
function voeRemovePatterns(str) {
|
|
const patterns = ["@$", "^^", "~@", "%?", "*~", "!!", "#&"];
|
|
let result = str;
|
|
for (const pat of patterns) {
|
|
result = result.split(pat).join("");
|
|
}
|
|
return result;
|
|
}
|
|
function voeBase64Decode(str) {
|
|
// atob is available in browsers and Node >= 16
|
|
if (typeof atob === "function") {
|
|
return atob(str);
|
|
}
|
|
// Node.js fallback
|
|
return Buffer.from(str, "base64").toString("utf-8");
|
|
}
|
|
function voeShiftChars(str, shift) {
|
|
return str
|
|
.split("")
|
|
.map((c) => String.fromCharCode(c.charCodeAt(0) - shift))
|
|
.join("");
|
|
}
|
|
|
|
////////////////////////////////////////////////
|
|
// PLUGINS //
|
|
////////////////////////////////////////////////
|
|
|
|
/**
|
|
* Uses Sora's fetchv2 on ipad, fallbacks to regular fetch on Windows
|
|
* @author ShadeOfChaos
|
|
*
|
|
* @param {string} url The URL to make the request to.
|
|
* @param {object} [options] The options to use for the request.
|
|
* @param {object} [options.headers] The headers to send with the request.
|
|
* @param {string} [options.method='GET'] The method to use for the request.
|
|
* @param {string} [options.body=null] The body of the request.
|
|
*
|
|
* @returns {Promise<Response|null>} The response from the server, or null if the
|
|
* request failed.
|
|
*/
|
|
async function soraFetch(url, options = { headers: {}, method: 'GET', body: null }) {
|
|
try {
|
|
return await fetchv2(url, options.headers ?? {}, options.method ?? 'GET', options.body ?? null);
|
|
} catch(e) {
|
|
try {
|
|
return await fetch(url, options);
|
|
} catch(error) {
|
|
await console.log('soraFetch error: ' + error.message);
|
|
return null;
|
|
}
|
|
}
|
|
}
|
|
|
|
class Unbaser {
|
|
constructor(base) {
|
|
this.ALPHABET = {
|
|
62: "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
|
95: "' !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'",
|
|
};
|
|
this.dictionary = {};
|
|
this.base = base;
|
|
if (36 < base && base < 62) {
|
|
this.ALPHABET[base] = this.ALPHABET[base] ||
|
|
this.ALPHABET[62].substr(0, base);
|
|
}
|
|
if (2 <= base && base <= 36) {
|
|
this.unbase = (value) => parseInt(value, base);
|
|
}
|
|
else {
|
|
try {
|
|
[...this.ALPHABET[base]].forEach((cipher, index) => {
|
|
this.dictionary[cipher] = index;
|
|
});
|
|
}
|
|
catch (er) {
|
|
throw Error("Unsupported base encoding.");
|
|
}
|
|
this.unbase = this._dictunbaser;
|
|
}
|
|
}
|
|
_dictunbaser(value) {
|
|
let ret = 0;
|
|
[...value].reverse().forEach((cipher, index) => {
|
|
ret = ret + ((Math.pow(this.base, index)) * this.dictionary[cipher]);
|
|
});
|
|
return ret;
|
|
}
|
|
}
|
|
function unpack(source) {
|
|
let { payload, symtab, radix, count } = _filterargs(source);
|
|
if (count != symtab.length) {
|
|
throw Error("Malformed p.a.c.k.e.r. symtab.");
|
|
}
|
|
let unbase;
|
|
try {
|
|
unbase = new Unbaser(radix);
|
|
}
|
|
catch (e) {
|
|
throw Error("Unknown p.a.c.k.e.r. encoding.");
|
|
}
|
|
function lookup(match) {
|
|
const word = match;
|
|
let word2;
|
|
if (radix == 1) {
|
|
word2 = symtab[parseInt(word)];
|
|
}
|
|
else {
|
|
word2 = symtab[unbase.unbase(word)];
|
|
}
|
|
return word2 || word;
|
|
}
|
|
source = payload.replace(/\b\w+\b/g, lookup);
|
|
return _replacestrings(source);
|
|
function _filterargs(source) {
|
|
const juicers = [
|
|
/}\('(.*)', *(\d+|\[\]), *(\d+), *'(.*)'\.split\('\|'\), *(\d+), *(.*)\)\)/,
|
|
/}\('(.*)', *(\d+|\[\]), *(\d+), *'(.*)'\.split\('\|'\)/,
|
|
];
|
|
for (const juicer of juicers) {
|
|
const args = juicer.exec(source);
|
|
if (args) {
|
|
let a = args;
|
|
if (a[2] == "[]") {
|
|
}
|
|
try {
|
|
return {
|
|
payload: a[1],
|
|
symtab: a[4].split("|"),
|
|
radix: parseInt(a[2]),
|
|
count: parseInt(a[3]),
|
|
};
|
|
}
|
|
catch (ValueError) {
|
|
throw Error("Corrupted p.a.c.k.e.r. data.");
|
|
}
|
|
}
|
|
}
|
|
throw Error("Could not make sense of p.a.c.k.e.r data (unexpected code structure)");
|
|
}
|
|
function _replacestrings(source) {
|
|
return source;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/* {GE END} */
|