From 2ff8f723ff4d998ac34d711e3d66b13a2fbe86ff Mon Sep 17 00:00:00 2001 From: JMcrafter26 <77780772+JMcrafter26@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:59:35 +0100 Subject: [PATCH] Updated GlobalExtractor version to 1.1.8 --- .gitignore | 11 + animetoast/animetoast.json | 32 +- animetoast/animetoast_v2.js | 671 ++++++++++++++++++++++++++++++- aniworld/AniWorldEngSub.json | 34 +- aniworld/AniWorldGerDub.dev.json | 4 +- aniworld/AniWorldGerDub.json | 34 +- aniworld/AniWorldGerSub.json | 34 +- aniworld/v2/AniWorldEngSub_v2.js | 671 ++++++++++++++++++++++++++++++- aniworld/v2/AniWorldGerDub_v2.js | 671 ++++++++++++++++++++++++++++++- aniworld/v2/AniWorldGerSub_v2.js | 671 ++++++++++++++++++++++++++++++- dorabash/dorabash.js | 284 +++++++++++-- dorabash/dorabash.json | 36 +- fireanime/FireAnimeGer.json | 32 +- fireanime/FireAnimeGerDub.json | 32 +- fireanime/fireanime.json | 32 +- fireanime/v2/FireAnimeEngSub.js | 671 ++++++++++++++++++++++++++++++- fireanime/v2/FireAnimeGerDub.js | 671 ++++++++++++++++++++++++++++++- fireanime/v2/FireAnimeGerSub.js | 671 ++++++++++++++++++++++++++++++- s.to/sToEngDub.json | 34 +- s.to/sToEngDub_v2.js | 671 ++++++++++++++++++++++++++++++- s.to/sToGerDub.json | 34 +- s.to/sToGerDub_v2.js | 671 ++++++++++++++++++++++++++++++- 22 files changed, 6419 insertions(+), 253 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..403466f --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +*.dev.json +*.tmp.json +*.temp.json +*.test.json +*.log +*.bak +*.old +*.temp +*.tmp + +update_global_extractor.py \ No newline at end of file diff --git a/animetoast/animetoast.json b/animetoast/animetoast.json index 5139d2e..9a64075 100644 --- a/animetoast/animetoast.json +++ b/animetoast/animetoast.json @@ -1,17 +1,17 @@ { - "sourceName": "AnimeToast", - "iconUrl": "https://www.animetoast.cc/wp-content/uploads/2018/03/toastfavi-300x300.png", - "author": { - "name": "50/50 & Cufiy", - "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" - }, - "version": "1.2.12", - "language": "German (DUB/SUB)", - "streamType": "MP4", - "quality": "1080p", - "baseUrl": "https://www.animetoast.cc/", - "searchBaseUrl": "https://www.animetoast.cc/?s=the%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/animetoast/animetoast_v2.js", - "asyncJS": true, - "type": "anime" -} + "sourceName": "AnimeToast", + "iconUrl": "https://www.animetoast.cc/wp-content/uploads/2018/03/toastfavi-300x300.png", + "author": { + "name": "50/50 & Cufiy", + "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" + }, + "version": "1.2.13", + "language": "German (DUB/SUB)", + "streamType": "MP4", + "quality": "1080p", + "baseUrl": "https://www.animetoast.cc/", + "searchBaseUrl": "https://www.animetoast.cc/?s=the%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/animetoast/animetoast_v2.js", + "asyncJS": true, + "type": "anime" +} \ No newline at end of file diff --git a/animetoast/animetoast_v2.js b/animetoast/animetoast_v2.js index 994c27a..0cd2cc8 100644 --- a/animetoast/animetoast_v2.js +++ b/animetoast/animetoast_v2.js @@ -313,7 +313,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -321,8 +321,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -335,7 +335,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -398,8 +408,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -454,7 +470,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -513,6 +536,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -520,6 +550,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -527,6 +571,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -610,6 +710,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -664,6 +786,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -681,6 +1151,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -859,7 +1508,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -895,6 +1548,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/aniworld/AniWorldEngSub.json b/aniworld/AniWorldEngSub.json index 58dd0e6..97839b3 100644 --- a/aniworld/AniWorldEngSub.json +++ b/aniworld/AniWorldEngSub.json @@ -1,18 +1,18 @@ { - "sourceName": "AniWorld (ENG SUB)", - "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", - "author": { - "name": "Hamzo & Cufiy", - "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" - }, - "version": "0.2.7", - "language": "English (SUB)", - "streamType": "HLS", - "quality": "720p", - "baseUrl": "https://google.com", - "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldEngSub_v2.js", - "asyncJS": true, - "streamAsyncJS": false, - "type": "anime" -} + "sourceName": "AniWorld (ENG SUB)", + "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", + "author": { + "name": "Hamzo & Cufiy", + "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" + }, + "version": "0.2.8", + "language": "English (SUB)", + "streamType": "HLS", + "quality": "720p", + "baseUrl": "https://google.com", + "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldEngSub_v2.js", + "asyncJS": true, + "streamAsyncJS": false, + "type": "anime" +} \ No newline at end of file diff --git a/aniworld/AniWorldGerDub.dev.json b/aniworld/AniWorldGerDub.dev.json index f450919..015e1b6 100644 --- a/aniworld/AniWorldGerDub.dev.json +++ b/aniworld/AniWorldGerDub.dev.json @@ -1,5 +1,5 @@ { - "sourceName": "AniWorld (fixed)", + "sourceName": "AniWorld (Local Test)", "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", "author": { "name": "Cufiy", @@ -11,7 +11,7 @@ "quality": "720p", "baseUrl": "https://vidmoly.to/", "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "http://192.168.2.130/sora-sources2/aniworld/v2/AniWorldGerDub_v2.js", + "scriptUrl": "http://192.168.2.130/sora-module-repos/sources/aniworld/v2/AniWorldGerDub_v2.js", "asyncJS": true, "type": "anime" } \ No newline at end of file diff --git a/aniworld/AniWorldGerDub.json b/aniworld/AniWorldGerDub.json index 22b36d1..126867b 100644 --- a/aniworld/AniWorldGerDub.json +++ b/aniworld/AniWorldGerDub.json @@ -1,18 +1,18 @@ { - "sourceName": "AniWorld (GER DUB)", - "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", - "author": { - "name": "Hamzo & Cufiy", - "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" - }, - "version": "0.2.7", - "language": "German (DUB)", - "streamType": "HLS", - "quality": "720p", - "baseUrl": "https://google.com", - "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldGerDub_v2.js", - "asyncJS": true, - "streamAsyncJS": false, - "type": "anime" -} + "sourceName": "AniWorld (GER DUB)", + "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", + "author": { + "name": "Hamzo & Cufiy", + "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" + }, + "version": "0.2.8", + "language": "German (DUB)", + "streamType": "HLS", + "quality": "720p", + "baseUrl": "https://google.com", + "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldGerDub_v2.js", + "asyncJS": true, + "streamAsyncJS": false, + "type": "anime" +} \ No newline at end of file diff --git a/aniworld/AniWorldGerSub.json b/aniworld/AniWorldGerSub.json index 53767bc..7dd9a38 100644 --- a/aniworld/AniWorldGerSub.json +++ b/aniworld/AniWorldGerSub.json @@ -1,18 +1,18 @@ { - "sourceName": "AniWorld (GER SUB)", - "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", - "author": { - "name": "Hamzo & Cufiy", - "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" - }, - "version": "0.2.7", - "language": "German (SUB)", - "streamType": "HLS", - "quality": "720p", - "baseUrl": "https://google.com", - "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldGerSub_v2.js", - "asyncJS": true, - "streamAsyncJS": false, - "type": "anime" -} + "sourceName": "AniWorld (GER SUB)", + "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/aniworld.png", + "author": { + "name": "Hamzo & Cufiy", + "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" + }, + "version": "0.2.8", + "language": "German (SUB)", + "streamType": "HLS", + "quality": "720p", + "baseUrl": "https://google.com", + "searchBaseUrl": "https://aniworld.to/ajax/seriesSearch?keyword=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/aniworld/v2/AniWorldGerSub_v2.js", + "asyncJS": true, + "streamAsyncJS": false, + "type": "anime" +} \ No newline at end of file diff --git a/aniworld/v2/AniWorldEngSub_v2.js b/aniworld/v2/AniWorldEngSub_v2.js index 598f075..3d1e322 100644 --- a/aniworld/v2/AniWorldEngSub_v2.js +++ b/aniworld/v2/AniWorldEngSub_v2.js @@ -350,7 +350,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -358,8 +358,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -372,7 +372,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -435,8 +445,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -491,7 +507,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -550,6 +573,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -557,6 +587,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -564,6 +608,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -647,6 +747,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -701,6 +823,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -718,6 +1188,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -896,7 +1545,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -932,6 +1585,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/aniworld/v2/AniWorldGerDub_v2.js b/aniworld/v2/AniWorldGerDub_v2.js index 3ca4c1d..8c31427 100644 --- a/aniworld/v2/AniWorldGerDub_v2.js +++ b/aniworld/v2/AniWorldGerDub_v2.js @@ -351,7 +351,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -359,8 +359,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -373,7 +373,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -436,8 +446,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -492,7 +508,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -551,6 +574,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -558,6 +588,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -565,6 +609,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -648,6 +748,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -702,6 +824,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -719,6 +1189,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -897,7 +1546,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -933,6 +1586,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/aniworld/v2/AniWorldGerSub_v2.js b/aniworld/v2/AniWorldGerSub_v2.js index 07b73ed..5508406 100644 --- a/aniworld/v2/AniWorldGerSub_v2.js +++ b/aniworld/v2/AniWorldGerSub_v2.js @@ -350,7 +350,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -358,8 +358,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -372,7 +372,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -435,8 +445,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -491,7 +507,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -550,6 +573,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -557,6 +587,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -564,6 +608,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -647,6 +747,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -701,6 +823,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -718,6 +1188,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -896,7 +1545,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -932,6 +1585,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/dorabash/dorabash.js b/dorabash/dorabash.js index d43e06d..8daaf19 100644 --- a/dorabash/dorabash.js +++ b/dorabash/dorabash.js @@ -147,7 +147,7 @@ function cleanHtmlSymbols(string) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.4} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -155,12 +155,13 @@ function cleanHtmlSymbols(string) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-08-13 03:44:07 - * @version 1.1.4 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ + function globalExtractor(providers) { for (const [url, provider] of Object.entries(providers)) { try { @@ -307,6 +308,8 @@ async function extractStreamUrlByProvider(url, provider) { headers["encoding"] = "windows-1251"; // required } else if (provider == 'sibnet') { headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } // fetch the url @@ -367,6 +370,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -374,6 +384,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } case "megacloud": try { return await megacloudExtractor(html, url); @@ -388,6 +405,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } case "sibnet": try { return await sibnetExtractor(html, url); @@ -395,6 +419,34 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from sibnet:", error); return null; } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } case "uqload": try { return await uqloadExtractor(html, url); @@ -402,6 +454,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from uqload:", error); return null; } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -430,17 +489,11 @@ async function extractStreamUrlByProvider(url, provider) { } - - - - //////////////////////////////////////////////// // EXTRACTORS // //////////////////////////////////////////////// // DO NOT EDIT BELOW THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING // - - /* --- bigwarp --- */ /** @@ -458,8 +511,6 @@ async function bigwarpExtractor(videoPage, url = null) { console.log("BigWarp HD Decoded:", bwDecoded); return bwDecoded; } - - /* --- doodstream --- */ /** @@ -493,7 +544,27 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} /* --- filemoon --- */ @@ -549,8 +620,18 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ - +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} /* --- megacloud --- */ /** @@ -560,16 +641,28 @@ async function filemoonExtractor(html, url = null) { // Megacloud V3 specific async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); const xraxParams = embedUrl.split('/').pop(); const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; const nonce = await getNonce(embedUrl); // return decrypt(secretKey, nonce, encryptedText); try { - const response = await fetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`); + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); const rawSourceData = await response.json(); const encrypted = rawSourceData?.sources; let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); if (rawSourceData?.encrypted == false) { decryptedSources = rawSourceData.sources; } @@ -577,14 +670,14 @@ async function megacloudExtractor(html, embedUrl) { decryptedSources = await getDecryptedSourceV3(encrypted, nonce); if (!decryptedSources) throw new Error("Failed to decrypt source"); } - console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); // return the first source if it's an array if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { try { return decryptedSources[0].file; } catch (error) { console.log("Error extracting MegaCloud stream URL:" + error); - return null; + return false; } } // return { @@ -774,7 +867,7 @@ async function megacloudExtractor(html, embedUrl) { * @returns {string|null} The extracted nonce, or null if it couldn't be found */ async function getNonce(embedUrl) { - const res = await fetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); const html = await res.text(); const match0 = html.match(/\/); if (match0?.[1]) { @@ -857,10 +950,10 @@ async function megacloudExtractor(html, embedUrl) { } return keys; } - function fetchKey(name, url, timeout = 1000) { + function fetchKey(name, url) { return new Promise(async (resolve) => { try { - const response = await fetch(url, { method: 'get', timeout: timeout }); + const response = await soraFetch(url, { method: 'get' }); const key = await response.text(); let trueKey = null; try { @@ -875,8 +968,6 @@ async function megacloudExtractor(html, embedUrl) { }); } } - - /* --- mp4upload --- */ /** @@ -894,8 +985,17 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ - +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} /* --- sibnet --- */ /** @@ -920,8 +1020,119 @@ async function sibnetExtractor(html, embedUrl) { return null; } } +/* --- streamtape --- */ +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} /* --- uqload --- */ /** @@ -938,7 +1149,20 @@ async function uqloadExtractor(html, embedUrl) { return null; } } +/* --- videospk --- */ +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} /* --- vidmoly --- */ @@ -978,8 +1202,6 @@ async function vidmolyExtractor(html, url = null) { return sourcesString; } } - - /* --- vidoza --- */ /** @@ -996,8 +1218,6 @@ async function vidozaExtractor(html, url = null) { return null; } } - - /* --- voe --- */ /** @@ -1093,10 +1313,6 @@ function voeShiftChars(str, shift) { .join(""); } - - - - //////////////////////////////////////////////// // PLUGINS // //////////////////////////////////////////////// @@ -1126,7 +1342,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -1163,6 +1383,10 @@ class Unbaser { } } +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); diff --git a/dorabash/dorabash.json b/dorabash/dorabash.json index c74be5e..df5ec2e 100644 --- a/dorabash/dorabash.json +++ b/dorabash/dorabash.json @@ -1,19 +1,19 @@ { - "sourceName": "DoraBash", - "iconUrl": "https://dorabash.com/wp-content/uploads/2023/06/cropped-Untitled_design-removebg-192x192.png", - "author": { - "name": "50/50", - "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" - }, - "version": "1.0.1", - "language": "Hindi", - "streamType": "HLS", - "quality": "1080p", - "baseUrl": "https://dorabash.com/", - "searchBaseUrl": "https://dorabash.com/", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/dorabash/dorabash.js", - "type": "anime", - "asyncJS": true, - "softsub": false, - "downloadSupport": false -} + "sourceName": "DoraBash", + "iconUrl": "https://dorabash.com/wp-content/uploads/2023/06/cropped-Untitled_design-removebg-192x192.png", + "author": { + "name": "50/50", + "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" + }, + "version": "1.0.2", + "language": "Hindi", + "streamType": "HLS", + "quality": "1080p", + "baseUrl": "https://dorabash.com/", + "searchBaseUrl": "https://dorabash.com/", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/dorabash/dorabash.js", + "type": "anime", + "asyncJS": true, + "softsub": false, + "downloadSupport": false +} \ No newline at end of file diff --git a/fireanime/FireAnimeGer.json b/fireanime/FireAnimeGer.json index 2dccd53..8fc257f 100644 --- a/fireanime/FireAnimeGer.json +++ b/fireanime/FireAnimeGer.json @@ -1,17 +1,17 @@ { - "sourceName": "FireAnime SUB", - "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", - "author": { - "name": "50/50 & Cufiy", - "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" - }, - "version": "1.1.2", - "language": "German (SUB)", - "streamType": "HLS", - "quality": "1080p", - "baseUrl": "https://fireani.me/", - "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeGerSub.js", - "asyncJS": true, - "type": "anime" -} + "sourceName": "FireAnime SUB", + "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", + "author": { + "name": "50/50 & Cufiy", + "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" + }, + "version": "1.1.3", + "language": "German (SUB)", + "streamType": "HLS", + "quality": "1080p", + "baseUrl": "https://fireani.me/", + "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeGerSub.js", + "asyncJS": true, + "type": "anime" +} \ No newline at end of file diff --git a/fireanime/FireAnimeGerDub.json b/fireanime/FireAnimeGerDub.json index 2d81d4b..6d3501f 100644 --- a/fireanime/FireAnimeGerDub.json +++ b/fireanime/FireAnimeGerDub.json @@ -1,17 +1,17 @@ { - "sourceName": "FireAnime DUB", - "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", - "author": { - "name": "50/50 & Cufiy", - "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" - }, - "version": "1.1.2", - "language": "German (DUB)", - "streamType": "HLS", - "quality": "1080p", - "baseUrl": "https://fireani.me/", - "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeGerDub.js", - "asyncJS": true, - "type": "anime" -} + "sourceName": "FireAnime DUB", + "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", + "author": { + "name": "50/50 & Cufiy", + "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" + }, + "version": "1.1.3", + "language": "German (DUB)", + "streamType": "HLS", + "quality": "1080p", + "baseUrl": "https://fireani.me/", + "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeGerDub.js", + "asyncJS": true, + "type": "anime" +} \ No newline at end of file diff --git a/fireanime/fireanime.json b/fireanime/fireanime.json index 2a32d27..5629de7 100644 --- a/fireanime/fireanime.json +++ b/fireanime/fireanime.json @@ -1,17 +1,17 @@ { - "sourceName": "FireAnime English (SUB)", - "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", - "author": { - "name": "50/50 & Cufiy", - "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" - }, - "version": "1.1.2", - "language": "English (SUB)", - "streamType": "HLS", - "quality": "1080p", - "baseUrl": "https://fireani.me/", - "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeEngSub.js", - "asyncJS": true, - "type": "anime" -} + "sourceName": "FireAnime English (SUB)", + "iconUrl": "https://i.ibb.co/dJ1SN5ch/favicon.png", + "author": { + "name": "50/50 & Cufiy", + "icon": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ3122kQwublLkZ6rf1fEpUP79BxZOFmH9BSA&s" + }, + "version": "1.1.3", + "language": "English (SUB)", + "streamType": "HLS", + "quality": "1080p", + "baseUrl": "https://fireani.me/", + "searchBaseUrl": "https://fireani.me/api/anime/search?q=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/fireanime/v2/FireAnimeEngSub.js", + "asyncJS": true, + "type": "anime" +} \ No newline at end of file diff --git a/fireanime/v2/FireAnimeEngSub.js b/fireanime/v2/FireAnimeEngSub.js index 53955b2..a581be0 100644 --- a/fireanime/v2/FireAnimeEngSub.js +++ b/fireanime/v2/FireAnimeEngSub.js @@ -190,7 +190,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -198,8 +198,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -212,7 +212,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -275,8 +285,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -331,7 +347,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -390,6 +413,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -397,6 +427,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -404,6 +448,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -487,6 +587,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -541,6 +663,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -558,6 +1028,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -736,7 +1385,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -772,6 +1425,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/fireanime/v2/FireAnimeGerDub.js b/fireanime/v2/FireAnimeGerDub.js index 3914bd1..fef42ce 100644 --- a/fireanime/v2/FireAnimeGerDub.js +++ b/fireanime/v2/FireAnimeGerDub.js @@ -191,7 +191,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -199,8 +199,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -213,7 +213,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -276,8 +286,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -332,7 +348,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -391,6 +414,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -398,6 +428,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -405,6 +449,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -488,6 +588,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -542,6 +664,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -559,6 +1029,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -737,7 +1386,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -773,6 +1426,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/fireanime/v2/FireAnimeGerSub.js b/fireanime/v2/FireAnimeGerSub.js index ae37264..eb9a3ed 100644 --- a/fireanime/v2/FireAnimeGerSub.js +++ b/fireanime/v2/FireAnimeGerSub.js @@ -190,7 +190,7 @@ async function sendLog(message) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -198,8 +198,8 @@ async function sendLog(message) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -212,7 +212,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -275,8 +285,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -331,7 +347,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -390,6 +413,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -397,6 +427,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -404,6 +448,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -487,6 +587,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -541,6 +663,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -558,6 +1028,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -736,7 +1385,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -772,6 +1425,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/s.to/sToEngDub.json b/s.to/sToEngDub.json index 6a6b81a..c1f6543 100644 --- a/s.to/sToEngDub.json +++ b/s.to/sToEngDub.json @@ -1,18 +1,18 @@ { - "sourceName": "s.to (ENG DUB)", - "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sto.png", - "author": { - "name": "Cufiy", - "icon": "https://files.catbox.moe/ttj4fc.gif" - }, - "version": "0.3.14", - "language": "English (DUB)", - "streamType": "HLS", - "quality": "720p", - "baseUrl": "https://google.com", - "searchBaseUrl": "https://s.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sToEngDub_v2.js", - "asyncJS": true, - "streamAsyncJS": false, - "type": "shows" -} + "sourceName": "s.to (ENG DUB)", + "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sto.png", + "author": { + "name": "Cufiy", + "icon": "https://files.catbox.moe/ttj4fc.gif" + }, + "version": "0.3.15", + "language": "English (DUB)", + "streamType": "HLS", + "quality": "720p", + "baseUrl": "https://google.com", + "searchBaseUrl": "https://s.to/ajax/seriesSearch?keyword=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sToEngDub_v2.js", + "asyncJS": true, + "streamAsyncJS": false, + "type": "shows" +} \ No newline at end of file diff --git a/s.to/sToEngDub_v2.js b/s.to/sToEngDub_v2.js index a8c7039..af0169a 100644 --- a/s.to/sToEngDub_v2.js +++ b/s.to/sToEngDub_v2.js @@ -352,7 +352,7 @@ function base64Decode(str) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -360,8 +360,8 @@ function base64Decode(str) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -374,7 +374,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -437,8 +447,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -493,7 +509,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -552,6 +575,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -559,6 +589,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -566,6 +610,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -649,6 +749,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -703,6 +825,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -720,6 +1190,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -898,7 +1547,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -934,6 +1587,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) { diff --git a/s.to/sToGerDub.json b/s.to/sToGerDub.json index df2cd18..8f99620 100644 --- a/s.to/sToGerDub.json +++ b/s.to/sToGerDub.json @@ -1,18 +1,18 @@ { - "sourceName": "s.to (GER DUB)", - "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sto.png", - "author": { - "name": "Hamzo & Cufiy", - "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" - }, - "version": "0.3.14", - "language": "German (DUB)", - "streamType": "HLS", - "quality": "720p", - "baseUrl": "https://google.com", - "searchBaseUrl": "https://s.to/ajax/seriesSearch?keyword=%s", - "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sToGerDub_v2.js", - "asyncJS": true, - "streamAsyncJS": false, - "type": "shows" -} + "sourceName": "s.to (GER DUB)", + "iconUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sto.png", + "author": { + "name": "Hamzo & Cufiy", + "icon": "https://cdn.discordapp.com/avatars/623644371819954226/591ecab10b0b4535e859bb0b9bbe62e5?size=1024" + }, + "version": "0.3.15", + "language": "German (DUB)", + "streamType": "HLS", + "quality": "720p", + "baseUrl": "https://google.com", + "searchBaseUrl": "https://s.to/ajax/seriesSearch?keyword=%s", + "scriptUrl": "https://git.luna-app.eu/50n50/sources/raw/branch/main/s.to/sToGerDub_v2.js", + "asyncJS": true, + "streamAsyncJS": false, + "type": "shows" +} \ No newline at end of file diff --git a/s.to/sToGerDub_v2.js b/s.to/sToGerDub_v2.js index de87028..6b77b61 100644 --- a/s.to/sToGerDub_v2.js +++ b/s.to/sToGerDub_v2.js @@ -352,7 +352,7 @@ function base64Decode(str) { // EDITING THIS FILE COULD BREAK THE UPDATER AND CAUSE ISSUES WITH THE EXTRACTOR /* {GE START} */ -/* {VERSION: 1.1.3} */ +/* {VERSION: 1.1.8} */ /** * @name global_extractor.js @@ -360,8 +360,8 @@ function base64Decode(str) { * @author Cufiy * @url https://github.com/JMcrafter26/sora-global-extractor * @license CUSTOM LICENSE - see https://github.com/JMcrafter26/sora-global-extractor/blob/main/LICENSE - * @date 2025-07-23 17:47:48 - * @version 1.1.3 + * @date 2025-11-05 15:44:57 + * @version 1.1.8 * @note This file was generated automatically. * The global extractor comes with an auto-updating feature, so you can always get the latest version. https://github.com/JMcrafter26/sora-global-extractor#-auto-updater */ @@ -374,7 +374,17 @@ function globalExtractor(providers) { // check if streamUrl is not null, a string, and starts with http or https if (streamUrl && typeof streamUrl === "string" && (streamUrl.startsWith("http"))) { return streamUrl; + // if its an array, get the value that starts with http + } else if (Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + return httpStream; + } + } else if (streamUrl || typeof streamUrl !== "string") { + // check if it's a valid stream URL + return null; } + } catch (error) { // Ignore the error and try the next provider } @@ -437,8 +447,14 @@ async function multiExtractor(providers) { console.log(`Skipping ${provider} as it has already 3 streams`); continue; } - const streamUrl = await extractStreamUrlByProvider(url, provider); - // check if streamUrl is not null, a string, and starts with http or https + let streamUrl = await extractStreamUrlByProvider(url, provider); + + if (streamUrl && Array.isArray(streamUrl)) { + const httpStream = streamUrl.find(url => url.startsWith("http")); + if (httpStream) { + streamUrl = httpStream; + } + } // check if provider is already in streams, if it is, add a number to it if ( !streamUrl || @@ -493,7 +509,14 @@ async function extractStreamUrlByProvider(url, provider) { if(provider == 'bigwarp') { delete headers["User-Agent"]; headers["x-requested-with"] = "XMLHttpRequest"; + } else if (provider == 'vk') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'sibnet') { + headers["encoding"] = "windows-1251"; // required + } else if (provider == 'supervideo') { + delete headers["User-Agent"]; } + // fetch the url // and pass the response to the extractor function console.log("Fetching URL: " + url); @@ -552,6 +575,13 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from doodstream:", error); return null; } + case "earnvids": + try { + return await earnvidsExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from earnvids:", error); + return null; + } case "filemoon": try { return await filemoonExtractor(html, url); @@ -559,6 +589,20 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from filemoon:", error); return null; } + case "lulustream": + try { + return await lulustreamExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from lulustream:", error); + return null; + } + case "megacloud": + try { + return await megacloudExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from megacloud:", error); + return null; + } case "mp4upload": try { return await mp4uploadExtractor(html, url); @@ -566,6 +610,62 @@ async function extractStreamUrlByProvider(url, provider) { console.log("Error extracting stream URL from mp4upload:", error); return null; } + case "sendvid": + try { + return await sendvidExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sendvid:", error); + return null; + } + case "sibnet": + try { + return await sibnetExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from sibnet:", error); + return null; + } + case "streamtape": + try { + return await streamtapeExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamtape:", error); + return null; + } + case "streamup": + try { + return await streamupExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from streamup:", error); + return null; + } + case "supervideo": + try { + return await supervideoExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from supervideo:", error); + return null; + } + case "uploadcx": + try { + return await uploadcxExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uploadcx:", error); + return null; + } + case "uqload": + try { + return await uqloadExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from uqload:", error); + return null; + } + case "videospk": + try { + return await videospkExtractor(html, url); + } catch (error) { + console.log("Error extracting stream URL from videospk:", error); + return null; + } case "vidmoly": try { return await vidmolyExtractor(html, url); @@ -649,6 +749,28 @@ function randomStr(length) { } return result; } +/* --- earnvids --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name earnvidsExtractor + * @author 50/50 + */ +async function earnvidsExtractor(html, url = null) { + try { + const obfuscatedScript = html.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + const baseUrl = url.match(/^(https?:\/\/[^/]+)/)[1]; + console.log("HLS Link:" + baseUrl + hlsLink); + return baseUrl + hlsLink; + } catch (err) { + console.log(err); + return "https://files.catbox.moe/avolvc.mp4"; + } +} + /* --- filemoon --- */ /* {REQUIRED PLUGINS: unbaser} */ @@ -703,6 +825,354 @@ async function filemoonExtractor(html, url = null) { } +/* --- lulustream --- */ + +/** + * @name LuluStream Extractor + * @author Cufiy + */ +async function lulustreamExtractor(data, url = null) { + const scriptRegex = /sources:\s*\[\{file:"([^"]+)"/; + const scriptMatch = scriptRegex.exec(data); + const decoded = scriptMatch ? scriptMatch[1] : false; + return decoded; +} +/* --- megacloud --- */ + +/** + * @name megacloudExtractor + * @author ShadeOfChaos + */ + +// Megacloud V3 specific +async function megacloudExtractor(html, embedUrl) { + // TESTING ONLY START + const testcase = '/api/static'; + if(embedUrl.slice(-testcase.length) == testcase) { + try { + const response = await soraFetch(embedUrl, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + embedUrl = response.url; + } catch (error) { + throw new Error("[TESTING ONLY] Megacloud extraction error:", error); + } + } + // TESTING ONLY END + const CHARSET = Array.from({ length: 95 }, (_, i) => String.fromCharCode(i + 32)); + const xraxParams = embedUrl.split('/').pop(); + const xrax = xraxParams.includes('?') ? xraxParams.split('?')[0] : xraxParams; + const nonce = await getNonce(embedUrl); + // return decrypt(secretKey, nonce, encryptedText); + try { + const response = await soraFetch(`https://megacloud.blog/embed-2/v3/e-1/getSources?id=${xrax}&_k=${nonce}`, { method: 'GET', headers: { "referer": "https://megacloud.blog/" } }); + const rawSourceData = await response.json(); + const encrypted = rawSourceData?.sources; + let decryptedSources = null; + // console.log('rawSourceData', rawSourceData); + if (rawSourceData?.encrypted == false) { + decryptedSources = rawSourceData.sources; + } + if (decryptedSources == null) { + decryptedSources = await getDecryptedSourceV3(encrypted, nonce); + if (!decryptedSources) throw new Error("Failed to decrypt source"); + } + // console.log("Decrypted sources:" + JSON.stringify(decryptedSources, null, 2)); + // return the first source if it's an array + if (Array.isArray(decryptedSources) && decryptedSources.length > 0) { + try { + return decryptedSources[0].file; + } catch (error) { + console.log("Error extracting MegaCloud stream URL:" + error); + return false; + } + } + // return { + // status: true, + // result: { + // sources: decryptedSources, + // tracks: rawSourceData.tracks, + // intro: rawSourceData.intro ?? null, + // outro: rawSourceData.outro ?? null, + // server: rawSourceData.server ?? null + // } + // } + } catch (error) { + console.error(`[ERROR][decryptSources] Error decrypting ${embedUrl}:`, error); + return { + status: false, + error: error?.message || 'Failed to get HLS link' + }; + } + /** + * Computes a key based on the given secret and nonce. + * The key is used to "unlock" the encrypted data. + * The computation of the key is based on the following steps: + * 1. Concatenate the secret and nonce. + * 2. Compute a hash value of the concatenated string using a simple + * hash function (similar to Java's String.hashCode()). + * 3. Compute the remainder of the hash value divided by the maximum + * value of a 64-bit signed integer. + * 4. Use the result as a XOR mask to process the characters of the + * concatenated string. + * 5. Rotate the XOR-processed string by a shift amount equal to the + * hash value modulo the length of the XOR-processed string plus 5. + * 6. Interleave the rotated string with the reversed nonce string. + * 7. Take a substring of the interleaved string of length equal to 96 + * plus the hash value modulo 33. + * 8. Convert each character of the substring to a character code + * between 32 and 126 (inclusive) by taking the remainder of the + * character code divided by 95 and adding 32. + * 9. Join the resulting array of characters into a string and return it. + * @param {string} secret - The secret string + * @param {string} nonce - The nonce string + * @returns {string} The computed key + */ + function computeKey(secret, nonce) { + const secretAndNonce = secret + nonce; + let hashValue = 0n; + for (const char of secretAndNonce) { + hashValue = BigInt(char.charCodeAt(0)) + hashValue * 31n + (hashValue << 7n) - hashValue; + } + const maximum64BitSignedIntegerValue = 0x7fffffffffffffffn; + const hashValueModuloMax = hashValue % maximum64BitSignedIntegerValue; + const xorMask = 247; + const xorProcessedString = [...secretAndNonce] + .map(char => String.fromCharCode(char.charCodeAt(0) ^ xorMask)) + .join(''); + const xorLen = xorProcessedString.length; + const shiftAmount = (Number(hashValueModuloMax) % xorLen) + 5; + const rotatedString = xorProcessedString.slice(shiftAmount) + xorProcessedString.slice(0, shiftAmount); + const reversedNonceString = nonce.split('').reverse().join(''); + let interleavedString = ''; + const maxLen = Math.max(rotatedString.length, reversedNonceString.length); + for (let i = 0; i < maxLen; i++) { + interleavedString += (rotatedString[i] || '') + (reversedNonceString[i] || ''); + } + const length = 96 + (Number(hashValueModuloMax) % 33); + const partialString = interleavedString.substring(0, length); + return [...partialString] + .map(ch => String.fromCharCode((ch.charCodeAt(0) % 95) + 32)) + .join(''); + } + /** + * Encrypts a given text using a columnar transposition cipher with a given key. + * The function arranges the text into a grid of columns and rows determined by the key length, + * fills the grid column by column based on the sorted order of the key characters, + * and returns the encrypted text by reading the grid row by row. + * + * @param {string} text - The text to be encrypted. + * @param {string} key - The key that determines the order of columns in the grid. + * @returns {string} The encrypted text. + */ + function columnarCipher(text, key) { + const columns = key.length; + const rows = Math.ceil(text.length / columns); + const grid = Array.from({ length: rows }, () => Array(columns).fill('')); + const columnOrder = [...key] + .map((char, idx) => ({ char, idx })) + .sort((a, b) => a.char.charCodeAt(0) - b.char.charCodeAt(0)); + let i = 0; + for (const { idx } of columnOrder) { + for (let row = 0; row < rows; row++) { + grid[row][idx] = text[i++] || ''; + } + } + return grid.flat().join(''); + } + /** + * Deterministically unshuffles an array of characters based on a given key phrase. + * The function simulates a pseudo-random shuffling using a numeric seed derived + * from the key phrase. This ensures that the same character array and key phrase + * will always produce the same output, allowing for deterministic "unshuffling". + * @param {Array} characters - The array of characters to unshuffle. + * @param {string} keyPhrase - The key phrase used to generate the seed for the + * pseudo-random number generator. + * @returns {Array} A new array representing the deterministically unshuffled characters. + */ + function deterministicUnshuffle(characters, keyPhrase) { + let seed = [...keyPhrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + const shuffledCharacters = characters.slice(); + for (let i = shuffledCharacters.length - 1; i > 0; i--) { + const j = randomNumberGenerator(i + 1); + [shuffledCharacters[i], shuffledCharacters[j]] = [shuffledCharacters[j], shuffledCharacters[i]]; + } + return shuffledCharacters; + } + /** + * Decrypts an encrypted text using a secret key and a nonce through multiple rounds of decryption. + * The decryption process includes base64 decoding, character substitution using a pseudo-random + * number generator, a columnar transposition cipher, and deterministic unshuffling of the character set. + * Finally, it extracts and parses the decrypted JSON string or verifies it using a regex pattern. + * + * @param {string} secretKey - The key used to decrypt the text. + * @param {string} nonce - A nonce for additional input to the decryption key. + * @param {string} encryptedText - The text to be decrypted, encoded in base64. + * @param {number} [rounds=3] - The number of decryption rounds to perform. + * @returns {Object|null} The decrypted JSON object if successful, or null if parsing fails. + */ + function decrypt(secretKey, nonce, encryptedText, rounds = 3) { + let decryptedText = Buffer.from(encryptedText, 'base64').toString('utf-8'); + const keyPhrase = computeKey(secretKey, nonce); + for (let round = rounds; round >= 1; round--) { + const encryptionPassphrase = keyPhrase + round; + let seed = [...encryptionPassphrase].reduce((acc, char) => (acc * 31n + BigInt(char.charCodeAt(0))) & 0xffffffffn, 0n); + const randomNumberGenerator = (upperLimit) => { + seed = (seed * 1103515245n + 12345n) & 0x7fffffffn; + return Number(seed % BigInt(upperLimit)); + }; + decryptedText = [...decryptedText] + .map(char => { + const charIndex = CHARSET.indexOf(char); + if (charIndex === -1) return char; + const offset = randomNumberGenerator(95); + return CHARSET[(charIndex - offset + 95) % 95]; + }) + .join(''); + decryptedText = columnarCipher(decryptedText, encryptionPassphrase); + const shuffledCharset = deterministicUnshuffle(CHARSET, encryptionPassphrase); + const mappingArr = {}; + shuffledCharset.forEach((c, i) => (mappingArr[c] = CHARSET[i])); + decryptedText = [...decryptedText].map(char => mappingArr[char] || char).join(''); + } + const lengthString = decryptedText.slice(0, 4); + let length = parseInt(lengthString, 10); + if (isNaN(length) || length <= 0 || length > decryptedText.length - 4) { + console.error('Invalid length in decrypted string'); + return decryptedText; + } + const decryptedString = decryptedText.slice(4, 4 + length); + try { + return JSON.parse(decryptedString); + } catch (e) { + console.warn('Could not parse decrypted string, unlikely to be valid. Using regex to verify'); + const regex = /"file":"(.*?)".*?"type":"(.*?)"/; + const match = encryptedText.match(regex); + const matchedFile = match?.[1]; + const matchType = match?.[2]; + if (!matchedFile || !matchType) { + console.error('Could not match file or type in decrypted string'); + return null; + } + return decryptedString; + } + } + /** + * Tries to extract the MegaCloud nonce from the given embed URL. + * + * Fetches the HTML of the page, and tries to extract the nonce from it. + * If that fails, it sends a request with the "x-requested-with" header set to "XMLHttpRequest" + * and tries to extract the nonce from that HTML. + * + * If all else fails, it logs the HTML of both requests and returns null. + * + * @param {string} embedUrl The URL of the MegaCloud embed + * @returns {string|null} The extracted nonce, or null if it couldn't be found + */ + async function getNonce(embedUrl) { + const res = await soraFetch(embedUrl, { headers: { "referer": "https://anicrush.to/", "x-requested-with": "XMLHttpRequest" } }); + const html = await res.text(); + const match0 = html.match(/\/); + if (match0?.[1]) { + return match0[1]; + } + const match1 = html.match(/_is_th:(\S*?)\s/); + if (match1?.[1]) { + return match1[1]; + } + const match2 = html.match(/data-dpi="([\s\S]*?)"/); + if (match2?.[1]) { + return match2[1]; + } + const match3 = html.match(/_lk_db[\s]?=[\s\S]*?x:[\s]"([\S]*?)"[\s\S]*?y:[\s]"([\S]*?)"[\s\S]*?z:[\s]"([\S]*?)"/); + if (match3?.[1] && match3?.[2] && match3?.[3]) { + return "" + match3[1] + match3[2] + match3[3]; + } + const match4 = html.match(/nonce="([\s\S]*?)"/); + if (match4?.[1]) { + if (match4[1].length >= 32) return match4[1]; + } + const match5 = html.match(/_xy_ws = "(\S*?)"/); + if (match5?.[1]) { + return match5[1]; + } + const match6 = html.match(/[a-zA-Z0-9]{48}]/); + if (match6?.[1]) { + return match6[1]; + } + return null; + } + async function getDecryptedSourceV3(encrypted, nonce) { + let decrypted = null; + const keys = await asyncGetKeys(); + for(let key in keys) { + try { + if (!encrypted) { + console.log("Encrypted source missing in response") + return null; + } + decrypted = decrypt(keys[key], nonce, encrypted); + if(!Array.isArray(decrypted) || decrypted.length <= 0) { + // Failed to decrypt source + continue; + } + for(let source of decrypted) { + if(source != null && source?.file?.startsWith('https://')) { + // Malformed decrypted source + continue; + } + } + console.log("Functioning key:", key); + return decrypted; + } catch(error) { + console.error('Error:', error); + console.error(`[${ new Date().toLocaleString() }] Key did not work: ${ key }`); + continue; + } + } + return null; + } + async function asyncGetKeys() { + const resolution = await Promise.allSettled([ + fetchKey("ofchaos", "https://ac-api.ofchaos.com/api/key"), + fetchKey("yogesh", "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"), + fetchKey("esteven", "https://raw.githubusercontent.com/carlosesteven/e1-player-deobf/refs/heads/main/output/key.json") + ]); + const keys = resolution.filter(r => r.status === 'fulfilled' && r.value != null).reduce((obj, r) => { + let rKey = Object.keys(r.value)[0]; + let rValue = Object.values(r.value)[0]; + if (typeof rValue === 'string') { + obj[rKey] = rValue.trim(); + return obj; + } + obj[rKey] = rValue?.mega ?? rValue?.decryptKey ?? rValue?.MegaCloud?.Anime?.Key ?? rValue?.megacloud?.key ?? rValue?.key ?? rValue?.megacloud?.anime?.key ?? rValue?.megacloud; + return obj; + }, {}); + if (keys.length === 0) { + throw new Error("Failed to fetch any decryption key"); + } + return keys; + } + function fetchKey(name, url) { + return new Promise(async (resolve) => { + try { + const response = await soraFetch(url, { method: 'get' }); + const key = await response.text(); + let trueKey = null; + try { + trueKey = JSON.parse(key); + } catch (e) { + trueKey = key; + } + resolve({ [name]: trueKey }) + } catch (error) { + resolve(null); + } + }); + } +} /* --- mp4upload --- */ /** @@ -720,6 +1190,185 @@ async function mp4uploadExtractor(html, url = null) { return null; } } +/* --- sendvid --- */ + +/** + * @name sendvidExtractor + * @author 50/50 + */ +async function sendvidExtractor(data, url = null) { + const match = data.match(/var\s+video_source\s*=\s*"([^"]+)"/); + const videoUrl = match ? match[1] : null; + return videoUrl; +} +/* --- sibnet --- */ + +/** + * @name sibnetExtractor + * @author scigward + */ +async function sibnetExtractor(html, embedUrl) { + try { + const videoMatch = html.match( + /player\.src\s*\(\s*\[\s*\{\s*src\s*:\s*["']([^"']+)["']/i + ); + if (!videoMatch || !videoMatch[1]) { + throw new Error("Sibnet video source not found"); + } + const videoPath = videoMatch[1]; + const videoUrl = videoPath.startsWith("http") + ? videoPath + : `https://video.sibnet.ru${videoPath}`; + return videoUrl; + } catch (error) { + console.log("SibNet extractor error: " + error.message); + return null; + } +} +/* --- streamtape --- */ + +/** + * + * @name streamTapeExtractor + * @author ShadeOfChaos + */ +async function streamtapeExtractor(html, url) { + let promises = []; + const LINK_REGEX = /link['"]{1}\).innerHTML *= *['"]{1}([\s\S]*?)["'][\s\S]*?\(["']([\s\S]*?)["']([\s\S]*?);/g; + const CHANGES_REGEX = /([0-9]+)/g; + if(html == null) { + if(url == null) { + throw new Error('Provided incorrect parameters.'); + } + const response = await soraFetch(url); + html = await response.text(); + } + const matches = html.matchAll(LINK_REGEX); + for (const match of matches) { + let base = match?.[1]; + let params = match?.[2]; + const changeStr = match?.[3]; + if(changeStr == null || changeStr == '') continue; + const changes = changeStr.match(CHANGES_REGEX); + for(let n of changes) { + params = params.substring(n); + } + while(base[0] == '/') { + base = base.substring(1); + } + const url = 'https://' + base + params; + promises.push(testUrl(url)); + } + // Race for first success + return Promise.any(promises).then((value) => { + return value; + }).catch((error) => { + return null; + }); + async function testUrl(url) { + return new Promise(async (resolve, reject) => { + try { + // Timeout version prefered, but Sora does not support it currently + // var response = await soraFetch(url, { method: 'GET', signal: AbortSignal.timeout(2000) }); + var response = await soraFetch(url); + if(response == null) throw new Error('Connection timed out.'); + } catch(e) { + console.error('Rejected due to:', e.message); + return reject(null); + } + if(response?.ok && response?.status === 200) { + return resolve(url); + } + console.warn('Reject because of response:', response?.ok, response?.status); + return reject(null); + }); + } +} +/* --- streamup --- */ + +/** + * @name StreamUp Extractor + * @author Cufiy + */ +async function streamupExtractor(data, url = null) { + // if url ends with /, remove it + if (url.endsWith("/")) { + url = url.slice(0, -1); + } + // split the url by / and get the last part + const urlParts = url.split("/"); + const videoId = urlParts[urlParts.length - 1]; + const apiUrl = `https://strmup.to/ajax/stream?filecode=${videoId}`; + const response = await soraFetch(apiUrl); + const jsonData = await response.json(); + if (jsonData && jsonData.streaming_url) { + return jsonData.streaming_url; + } else { + console.log("No streaming URL found in the response."); + return null; + } +} +/* --- supervideo --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name SuperVideo Extractor + * @author 50/50 + */ +async function supervideoExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const regex = /file:\s*"([^"]+\.m3u8)"/; + const match = regex.exec(unpackedScript); + if (match) { + const fileUrl = match[1]; + console.log("File URL:" + fileUrl); + return fileUrl; + } + return "No stream found"; +} + +/* --- uploadcx --- */ + +/** + * @name UploadCx Extractor + * @author 50/50 + */ +async function uploadcxExtractor(data, url = null) { + const mp4Match = /sources:\s*\["([^"]+\.mp4)"]/i.exec(data); + return mp4Match ? mp4Match[1] : null; +} +/* --- uqload --- */ + +/** + * @name uqloadExtractor + * @author scigward + */ +async function uqloadExtractor(html, embedUrl) { + try { + const match = html.match(/sources:\s*\[\s*"([^"]+\.mp4)"\s*\]/); + const videoSrc = match ? match[1] : ""; + return videoSrc; + } catch (error) { + console.log("uqloadExtractor error:", error.message); + return null; + } +} +/* --- videospk --- */ + +/* {REQUIRED PLUGINS: unbaser} */ +/** + * @name videospkExtractor + * @author 50/50 + */ +async function videospkExtractor(data, url = null) { + const obfuscatedScript = data.match(/]*>\s*(eval\(function\(p,a,c,k,e,d.*?\)[\s\S]*?)<\/script>/); + const unpackedScript = unpack(obfuscatedScript[1]); + const streamMatch = unpackedScript.match(/["'](\/stream\/[^"']+)["']/); + const hlsLink = streamMatch ? streamMatch[1] : null; + return "https://videospk.xyz" + hlsLink; +} + /* --- vidmoly --- */ /** @@ -898,7 +1547,11 @@ async function soraFetch(url, options = { headers: {}, method: 'GET', body: null } } } - +/*********************************************************** + * UNPACKER MODULE + * Credit to GitHub user "mnsrulz" for Unpacker Node library + * https://github.com/mnsrulz/unpacker + ***********************************************************/ class Unbaser { constructor(base) { this.ALPHABET = { @@ -934,6 +1587,12 @@ class Unbaser { return ret; } } + +function detectUnbaser(source) { + /* Detects whether `source` is P.A.C.K.E.R. coded. */ + return source.replace(" ", "").startsWith("eval(function(p,a,c,k,e,"); +} + function unpack(source) { let { payload, symtab, radix, count } = _filterargs(source); if (count != symtab.length) {