Compare commits

...

31 commits

Author SHA1 Message Date
desu-bot
ef375d1188 chore: update public repo 2025-12-17 13:39:15 +00:00
desu-bot
3be0947026 chore: update public repo 2025-10-30 12:03:47 +00:00
desu-bot
732192050a chore: update public repo 2025-10-25 04:16:32 +00:00
desu-bot
5fce1ce275 chore: update public repo 2025-09-21 19:26:47 +00:00
desu-bot
a40bf6b472 chore: update public repo 2025-09-14 21:52:13 +00:00
desu-bot
6c1fe8a13c chore: update public repo 2025-08-14 09:21:11 +00:00
desu-bot
56472e5520 chore: update public repo 2025-08-06 17:30:02 +00:00
desu-bot
455ec08c01 chore: update public repo 2025-06-18 20:26:53 +00:00
desu-bot
db2be56aa3 chore: update public repo 2025-05-14 09:48:24 +00:00
desu-bot
c0956d52ea chore: update public repo 2025-05-14 09:39:22 +00:00
desu-bot
53398ff93d chore: update public repo 2025-05-11 22:29:57 +00:00
desu-bot
4645c0253f chore: update public repo 2025-05-10 21:06:34 +00:00
desu-bot
910d31c9ab chore: update public repo 2025-05-09 06:48:15 +00:00
desu-bot
df138936b1 chore: update public repo 2025-05-09 06:46:50 +00:00
desu-bot
cd9b06512f chore: update public repo 2025-04-27 22:31:46 +00:00
desu-bot
33adc5f4ed chore: update public repo 2025-04-09 04:20:08 +00:00
desu-bot
6cf10f04a5 chore: update public repo 2025-03-25 02:39:46 +00:00
desu-bot
c3d03874e7 chore: update public repo 2025-03-14 10:14:10 +00:00
desu-bot
7e7001e63b chore: update public repo 2025-03-08 08:19:53 +00:00
desu-bot
1b72ac0bc4 chore: update public repo 2025-02-19 02:30:26 +00:00
desu-bot
2bb69565a3 chore: update public repo 2025-02-05 09:12:00 +00:00
desu-bot
a1b29f7fad chore: update public repo 2025-02-03 14:42:22 +00:00
desu-bot
16a1237fdf chore: update public repo 2025-01-29 15:09:10 +00:00
desu-bot
176d74e009 chore: update public repo 2025-01-29 14:37:00 +00:00
desu-bot
26bd4280af chore: update public repo 2025-01-24 09:56:11 +00:00
desu-bot
c974b06523 chore: update public repo 2025-01-18 07:55:22 +00:00
desu-bot
f2d62f5954 chore: update public repo 2025-01-18 04:01:14 +00:00
desu-bot
89e10fc021 chore: update public repo 2025-01-16 03:25:20 +00:00
teidesu
ae327488d4 Update README.md 2025-01-14 05:42:32 +03:00
desu-bot
e75d7f7f29 chore: update public repo 2025-01-14 02:38:00 +00:00
desu-bot
7eaf8df8f0 chore: update public repo 2025-01-14 02:37:26 +00:00
53 changed files with 10673 additions and 1 deletions

13
.gitignore vendored Normal file
View file

@ -0,0 +1,13 @@
node_modules/
private/
.nyc_output/
**/.DS_Store
.idea
.vscode
*.log
/assets
coverage
.rollup.cache
*.tsbuildinfo
.env

26
LICENSE Normal file
View file

@ -0,0 +1,26 @@
# DON'T BE A DICK PUBLIC LICENSE
> Version 1.1, December 2016
> Copyright (C) 2024 alina sireneva
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document.
> DON'T BE A DICK PUBLIC LICENSE
> TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
1. Do whatever you like with the original work, just don't be a dick.
Being a dick includes - but is not limited to - the following instances:
1a. Outright copyright infringement - Don't just copy this and change the name.
1b. Selling the unmodified original with no work done what-so-ever, that's REALLY being a dick.
1c. Modifying the original work to contain hidden harmful content. That would make you a PROPER dick.
2. If you become rich through modifications, related works/services, or supporting the original work,
share the love. Only a dick would make loads off this work and not buy the original work's
creator(s) a pint.
3. Code is provided with no warranty. Using somebody else's code and bitching when it goes wrong makes
you a DONKEY dick. Fix the problem yourself. A non-dick would submit the fix back.

1
README
View file

@ -1 +0,0 @@
meow :3

13
README.md Normal file
View file

@ -0,0 +1,13 @@
## teidesu/scripts
hewwo so this repo is a collection of some scripts i wrote over time that might be interesting to someone except me
feel free to use the code in this repo in any way you want
> note: this repo is auto-generated from a private one, because there is some stuff i am legally not able to share
>
> if this results in some files being missing and some script being un-runnable, please do hmu!
### license
this repo is licensed under [DON'T BE A DICK PUBLIC LICENSE](https://github.com/philsturgeon/dbad)

27
eslint.config.js Normal file
View file

@ -0,0 +1,27 @@
import antfu from '@antfu/eslint-config'
export default antfu({
ignores: [
'assets/',
'node_modules/',
'dist/',
],
typescript: true,
gitignore: false,
rules: {
'curly': ['error', 'multi-line'],
'style/brace-style': ['error', '1tbs', { allowSingleLine: true }],
'n/prefer-global/buffer': 'off',
'no-restricted-globals': ['error', 'Buffer', '__dirname', 'require'],
'style/quotes': ['error', 'single', { avoidEscape: true }],
'test/consistent-test-it': 'off',
'test/prefer-lowercase-title': 'off',
'antfu/if-newline': 'off',
'style/max-statements-per-line': ['error', { max: 2 }],
'ts/no-redeclare': 'off',
'no-alert': 'off',
'no-console': 'off',
'node/prefer-global/process': 'off',
'unused-imports/no-unused-vars': 'off',
},
})

59
package.json Normal file
View file

@ -0,0 +1,59 @@
{
"name": "teidesu-scripts",
"type": "module",
"packageManager": "pnpm@9.5.0",
"dependencies": {
"@faker-js/faker": "^9.3.0",
"@fuman/io": "^0.0.4",
"@fuman/net": "^0.0.9",
"@fuman/node": "^0.0.4",
"@mtcute/node": "^0.19.1",
"@types/better-sqlite3": "^7.6.12",
"@types/plist": "^3.0.5",
"@types/spinnies": "^0.5.3",
"babel-generator": "^6.26.1",
"babel-traverse": "^6.26.0",
"babylon": "^6.18.0",
"better-sqlite3": "^11.8.1",
"canvas": "^3.1.0",
"cheerio": "^1.0.0",
"egoroof-blowfish": "4.0.1",
"es-main": "^1.3.0",
"filesize": "^10.1.6",
"imapflow": "^1.0.193",
"json5": "^2.2.3",
"kuromoji": "^0.1.2",
"mailparser": "^3.7.4",
"nanoid": "^5.0.9",
"node-libcurl-ja3": "^5.0.3",
"patchright": "^1.52.5",
"plist": "^3.1.0",
"qrcode-terminal": "^0.12.0",
"spinnies": "^0.5.1",
"tough-cookie": "^5.0.0",
"tough-cookie-file-store": "^2.0.3",
"ts-morph": "^26.0.0",
"tsx": "^4.19.2",
"undici": "^7.2.0",
"wanakana": "^5.3.1"
},
"devDependencies": {
"@antfu/eslint-config": "3.10.0",
"@fuman/fetch": "0.1.0",
"@fuman/utils": "0.0.14",
"@types/mailparser": "^3.4.6",
"@types/node": "22.10.0",
"domhandler": "^5.0.3",
"dotenv": "16.4.5",
"htmlparser2": "^10.0.0",
"zod": "3.23.8",
"zx": "8.2.2"
},
"pnpm": {
"onlyBuiltDependencies": [
"better-sqlite3",
"node-libcurl-ja3",
"canvas"
]
}
}

5840
pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
import qrTerminal from 'qrcode-terminal'
import { createTg } from '../../utils/telegram.ts'
const sessionName = process.argv[2]
if (!sessionName) {
console.error('Usage: mtcute-login.ts <session name>')
process.exit(1)
}
const tg = createTg(sessionName)
await tg.prepare()
await tg.storage.clear(true)
const self = await tg.start({
qrCodeHandler(url, expires) {
console.log(qrTerminal.generate(url, { small: true }))
},
})
console.log(`Logged in as ${self.displayName} (${self.id})`)
await tg.close()

View file

@ -0,0 +1,42 @@
import { $, ProcessOutput } from 'zx'
import { fetchSongsIter } from '../../../utils/navidrome.ts'
import { asyncPool } from '@fuman/utils'
import { join } from 'path/posix'
// async function checkIfBroken(path: string) {
// const r = await $`ffprobe -v error -show_entries stream=codec_type,codec_name,index:stream_tags=title,language -of json ${path}`.json()
// }
// for await (const song of fetchSongsIter()) {
// }
const broken: string[] = []
await asyncPool(fetchSongsIter({
onChunkProcessed: (page, items) => {
console.log(`Processed page ${page} with ${items} items`)
},
}), async (song) => {
const fullPath = join(song.libraryPath, song.path)
const path = fullPath.replace('/music/s3/', '/mnt/tank/enc/media/music/')
try {
const r = await $`ffmpeg -v error -i ${path} -f null -`.quiet()
if (r.exitCode !== 0 || r.stderr.trim() !== '') throw r
} catch (e) {
if (!(e instanceof ProcessOutput)) throw e
console.log('%s - %s (%s) seems broken:', song.artist, song.title, path)
console.log(e.stderr)
broken.push(path)
}
}, { limit: 8 })
if (broken.length > 0) {
console.log('Found %d broken files:', broken.length)
for (const path of broken.sort()) {
console.log(' %s', path)
}
process.exit(1)
}

View file

@ -0,0 +1,100 @@
import type { NavidromeSong } from '../../../utils/navidrome.ts'
import { createRequire } from 'node:module'
import { join } from 'node:path'
import kuromoji from 'kuromoji'
import { isKana, toRomaji } from 'wanakana'
import { fetchSongsIter } from '../../../utils/navidrome.ts'
const WHITELIST_KEYS = new Set([
// actual different tracks with the same title
'["sorry about my face","untitled track"]',
'["kooeetekumogeemusu","neko bushou sengoku emaki"]',
'["eve","merufuakutorii"]',
// todo
'["arm","legend of zelda"]',
'["arm","tomorrow heart beat ~ ashita anata ni dokkidoki☆ ~"]',
'["dwat","rotladatormarf"]',
'["fujiwara mari sai","zenbuatashinokawaiino"]',
])
const moji = await new Promise<any>((resolve, reject) => {
kuromoji.builder({
dicPath: join(createRequire(import.meta.url).resolve('kuromoji/'), '../../dict'),
}).build((err, tokenizer) => {
if (err) return reject(err)
resolve(tokenizer)
})
})
function clean(s: string) {
const str = s.toLowerCase()
.replace(/\(Explicit\)/i, '')
.replace(/[!@#$%^&*()_+=[\]{}\\|/,.;':"<>`~-]/g, '')
if (str.match(/[\u3000-\u303F\u3040-\u309F\u30A0-\u30FF\uFF00-\uFF9F\u4E00-\u9FAF\u3400-\u4DBF]/)) {
// has japanese
const tokens = moji.tokenize(str)
let res = ''
for (const token of tokens) {
if (token.word_type === 'UNKNOWN') {
res += isKana(token.surface_form) ? toRomaji(token.surface_form) : token.surface_form
} else if (token.word_type === 'KNOWN') {
res += `${toRomaji(token.reading)} `
}
}
return res.trimEnd()
}
return str
}
function getSongKey(song: NavidromeSong) {
return JSON.stringify([
clean(song.artist),
clean(song.title),
])
}
const seen = new Map<string, NavidromeSong[]>()
for await (const song of fetchSongsIter({
onChunkProcessed: (page, items) => {
console.log('⌛ fetched chunk %d (%d items)', page, items)
},
})) {
const key = getSongKey(song)
if (WHITELIST_KEYS.has(key)) continue
let arr = seen.get(key)
if (!arr) {
arr = []
seen.set(key, arr)
}
arr.push(song)
}
const keysSorted = Array.from(seen.keys()).sort()
let duplicates = 0
for (const key of keysSorted) {
const arr = seen.get(key)!
if (arr.length === 1) continue
duplicates += 1
console.log()
console.log('found duplicates for %s:', key)
for (const song of arr) {
console.log(' %s - %s (from %s - %s) (at %s)', song.artist, song.title, song.albumArtist, song.album, song.path)
}
}
if (duplicates === 0) {
console.log('✅ no duplicates found')
} else {
console.log('🚨 %d duplicates found', duplicates)
}

View file

@ -0,0 +1,22 @@
import { fetchSongsIter } from '../../../utils/navidrome.ts'
const IGNORE_PATHS = [
's3/Electronic/_Compilations/keygenjukebox/',
]
let count = 0
for await (const song of fetchSongsIter()) {
if (IGNORE_PATHS.some(path => song.path.startsWith(path))) {
continue
}
for (const field of ['mbzRecordingID', 'mbzReleaseTrackId', 'mbzAlbumId', 'mbzReleaseGroupId']) {
if (!song[field]) {
console.log('found missing %s: %s - %s (%s)', field, song.artist, song.title, song.path)
count++
break
}
}
}
console.log('found %d tracks without mbz ids', count)

View file

@ -0,0 +1,21 @@
import { fetchSongsIter } from '../../../utils/navidrome.ts'
const WHITELIST_ARTISTS = new Set([
'betwixt & between',
'10th avenue cafe/tak',
'overmind and potatoes',
])
let count = 0
for await (const song of fetchSongsIter()) {
if (
(!song.participants?.artist || song.participants.artist.length === 1)
&& song.artist.match(/, | and | & |\/| x | feat\. /i)
&& !WHITELIST_ARTISTS.has(song.artist.toLowerCase())
) {
console.log('possible multiartist: %s - %s (%s)', song.artist, song.title, song.path)
count++
}
}
console.log('found %d possible multiartists', count)

View file

@ -0,0 +1,66 @@
import { readFile, rm } from 'node:fs/promises'
import { join } from 'node:path'
import { $ } from 'zx'
import { downloadStream } from '../../../utils/fetch.ts'
import { getEnv } from '../../../utils/misc.ts'
import { fetchSongs } from '../../../utils/navidrome.ts'
import { WebdavClient } from '../../../utils/webdav.ts'
const webdav = new WebdavClient({
baseUrl: getEnv('NAVIDROME_WEBDAV_ENDPOINT'),
username: getEnv('NAVIDROME_WEBDAV_USERNAME'),
password: getEnv('NAVIDROME_WEBDAV_PASSWORD'),
})
const CHUNK_SIZE = 1000
for (let offset = 0; ; offset += CHUNK_SIZE) {
const songs = await fetchSongs(offset, CHUNK_SIZE)
if (songs.length === 0) break
for (const song of songs) {
const ext = song.path.split('.').pop()!
if (ext !== 'm4a') continue
console.log('❌ song %s is m4a, remuxing...', song.path)
const webdavPath = song.path.replace('/music/s3/', '/')
const res = await webdav.get(webdavPath).catch(() => null)
if (!res) {
console.log(' ❌ failed to get %s', webdavPath)
continue
}
const tmpfile = join('assets', `${song.id}.m4a`)
await downloadStream(res.body!, tmpfile)
console.log(' - downloaded to %s', tmpfile)
const probe = await $`ffprobe -v error -show_entries stream=codec_type,codec_name,index:stream_tags=title,language -of json ${tmpfile}`.json()
const audioStream = probe.streams.find(stream => stream.codec_type === 'audio')
if (!audioStream) {
console.log(' ❌ no audio stream found')
await rm(tmpfile)
continue
}
const codec = audioStream.codec_name
if (codec !== 'flac') {
console.log(` ❌ audio stream is ${codec}, not flac, skipping`)
await rm(tmpfile)
continue
}
console.log(' - audio stream is flac, remuxing')
// remux
const remuxed = join('assets', `${song.id}.flac`)
await rm(remuxed, { force: true })
await $`ffmpeg -i ${tmpfile} -c:a copy ${remuxed}`.quiet(true)
console.log(' - remuxed to %s', remuxed)
await rm(tmpfile)
await webdav.put(webdavPath.replace('.m4a', '.flac'), await readFile(remuxed))
await webdav.delete(webdavPath)
console.log(' - uploaded to %s', webdavPath.replace('.m4a', '.flac'))
await rm(remuxed)
}
}

View file

@ -0,0 +1,18 @@
import { fetchSongs, fetchSongsIter } from '../../../utils/navidrome.ts'
let count = 0
let totalSize = 0
let totalDuration = 0
console.log('⌛ fetching songs...')
for await (const song of fetchSongsIter()) {
count += 1
totalSize += song.size
totalDuration += song.duration
}
console.log('---')
console.log('total songs: %d', count)
console.log('total size: %d GiB', (totalSize / 1024 / 1024 / 1024).toFixed(3))
console.log('total duration: %d min (%d h)', (totalDuration / 60).toFixed(3), (totalDuration / 60 / 60).toFixed(3))

View file

@ -0,0 +1,39 @@
import { filesize } from 'filesize'
import { z } from 'zod'
import { ffetch } from '../../utils/fetch.ts'
import { getEnv } from '../../utils/misc.ts'
const res = await ffetch('/api/v0/transfers/uploads', {
baseUrl: getEnv('SLSKD_ENDPOINT'),
headers: {
cookie: getEnv('SLSKD_COOKIE'),
},
}).parsedJson(z.array(
z.object({
username: z.string(),
directories: z.array(z.object({
directory: z.string(),
fileCount: z.number(),
files: z.array(z.object({
id: z.string(),
filename: z.string(),
state: z.string(),
bytesTransferred: z.number(),
})),
})),
}),
))
let total = 0
for (const user of res) {
for (const dir of user.directories) {
for (const file of dir.files) {
if (file.state !== 'Completed, Succeeded') continue
total += file.bytesTransferred
}
}
}
console.log(filesize(total))

View file

@ -0,0 +1,58 @@
import { iter } from '@fuman/utils'
import { z } from 'zod'
import { minimist, question } from 'zx'
import { downloadFile, ffetch } from '../../utils/fetch.ts'
const args = minimist(process.argv.slice(2), {
string: ['filename'],
})
const query = args._[0] ?? await question('Search query (Artist - Album): ')
const data = await ffetch('https://api.deezer.com/search', {
query: {
q: query,
limit: 15,
},
}).parsedJson(z.object({
data: z.array(z.object({
type: z.literal('track'),
title: z.string(),
artist: z.object({
name: z.string(),
}),
album: z.object({
id: z.number(),
title: z.string(),
cover_xl: z.string(),
}),
})),
}))
const groupedByAlbum = new Map<number, typeof data['data']>()
for (const result of data.data) {
const albumId = result.album.id
if (!groupedByAlbum.has(albumId)) {
groupedByAlbum.set(albumId, [])
}
groupedByAlbum.get(albumId)!.push(result)
}
const idxToAlbum = new Map<number, number>()
for (const [idx, [id, tracks]] of iter.enumerate(groupedByAlbum.entries())) {
idxToAlbum.set(idx, id)
console.log(`${idx + 1}. ${tracks[0].artist.name} - ${tracks[0].album.title}`)
for (const track of tracks) {
console.log(` ${track.title}`)
}
}
console.log('Enter number to download album art:')
const number = Number.parseInt(await question('[1] > ') || '1')
const artworkUrl = groupedByAlbum.get(idxToAlbum.get(number - 1)!)![0].album.cover_xl
await downloadFile(artworkUrl, args.filename ?? `assets/${query.replace(/\s/g, '_')}.jpg`)

859
scripts/media/deezer-dl.ts Normal file

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,129 @@
import { rm } from 'node:fs/promises'
import { $, question } from 'zx'
import { fileExists } from '../../utils/fs.ts'
let filename = await question('filename >')!
const startTs = await question('start timestamp >')
const endTs = await question('end timestamp >')
const outputFilename = await question('output filename [output.mp4] >') || 'assets/output.mp4'
if (filename[0] === '\'' && filename[filename.length - 1] === '\'') {
filename = filename.slice(1, -1)
}
const ffprobe = await $`ffprobe -v error -show_entries stream=codec_type,codec_name,index:stream_tags=title,language -of json ${filename}`.json()
async function chooseStream(type: string, options: any[], allowNone = false) {
console.log(`Found ${type} streams:`)
for (let i = 0; i < options.length; i++) {
const stream = options[i]
console.log(`[${i + 1}] (${stream.codec_name}, ${stream.tags.language}) ${stream.tags.title}`)
}
if (allowNone) {
console.log(`[0] No ${type}`)
}
const res = await question(`select ${type} >`) || '0'
if (res === '0' && allowNone) {
return null
}
const streamIndex = Number.parseInt(res)
if (Number.isNaN(streamIndex) || streamIndex < 1 || streamIndex > options.length) {
console.error('Invalid input')
process.exit(1)
}
return streamIndex - 1
}
const allVideos = ffprobe.streams.filter(stream => stream.codec_type === 'video')
const allAudios = ffprobe.streams.filter(stream => stream.codec_type === 'audio')
const allSubtitles = ffprobe.streams.filter(stream => stream.codec_type === 'subtitle')
let videoStream: number | null = null
let audioStream: number | null = null
let subtitleStream: number | null = null
if (allVideos.length > 1) {
videoStream = await chooseStream('video', allVideos)
} else if (allVideos.length > 0) {
videoStream = 0
} else {
console.error('No video streams found')
process.exit(1)
}
if (allAudios.length > 1) {
audioStream = await chooseStream('audio', allAudios)
} else if (allAudios.length > 0) {
audioStream = 0
} else {
console.warn('No audio streams found, proceeding without audio')
}
if (allSubtitles.length > 0) {
subtitleStream = await chooseStream('subtitle', allSubtitles, true)
}
const args: string[] = [
'-i',
filename,
'-c:v',
'libx264',
'-map',
`0:v:${videoStream}`,
'-c:v',
'libx264',
]
if (audioStream !== null) {
args.push('-map', `0:a:${audioStream}`)
}
if (subtitleStream !== null) {
const filenameEscaped = filename.replace(/'/g, "'\\\\\\''")
args.push('-vf', `format=yuv420p,subtitles='${filenameEscaped}':si=${subtitleStream}`)
} else {
args.push('-vf', 'format=yuv420p')
}
if (audioStream !== null) {
args.push('-c:a', 'libopus')
if (allAudios[audioStream].codec_name === 'flac') {
args.push('-b:a', '320k')
}
}
args.push(
'-ss',
startTs!,
'-to',
endTs!,
outputFilename,
)
if (await fileExists(outputFilename)) {
const overwrite = await question('Output file already exists, overwrite? [y/N] >')
if (overwrite?.toLowerCase() !== 'y') {
process.exit(0)
}
await rm(outputFilename)
}
try {
$.env.AV_LOG_FORCE_COLOR = 'true'
await $`ffmpeg ${args}`
} catch (e) {
process.exit(1)
}
const openDir = await question('open output directory? [Y/n] >')
if (!openDir || openDir?.toLowerCase() === 'y') {
await $`open -R ${outputFilename}`
}

View file

@ -0,0 +1,40 @@
import { mkdir } from 'node:fs/promises'
import { asyncPool } from '@fuman/utils'
import json5 from 'json5'
import Spinnies from 'spinnies'
import { z } from 'zod'
import { downloadFile, ffetch } from '../../utils/fetch.ts'
import { fileExists } from '../../utils/fs.ts'
import { parseJsObject } from '../../utils/strings.ts'
const $ = await ffetch('https://fwmc-ai.github.io/radio/').cheerio()
const script = $('script:icontains(const playlist =)').html()!
const playlistJs = parseJsObject(`[${script.split('const playlist = [').at(-1)!}`)!
const playlist = z.array(
z.object({
id: z.string(),
title: z.string(),
file: z.string(),
cover: z.string(),
category: z.enum(['original', 'cover']),
lyrics: z.string(),
}),
).parse(json5.parse(playlistJs))
const spinnies = new Spinnies()
await mkdir('assets/fwmc-radio', { recursive: true })
await asyncPool(playlist, async (item) => {
const dlPath = `assets/fwmc-radio/${item.id}.mp3`
if (await fileExists(dlPath)) return
spinnies.add(item.id, { text: item.title })
await downloadFile(new URL(item.file, 'https://fwmc-ai.github.io/radio/').toString(), dlPath)
spinnies.remove(item.id)
})
console.log('done')
spinnies.stopAll()

View file

@ -0,0 +1,46 @@
import { iter } from '@fuman/utils'
import { z } from 'zod'
import { minimist, question } from 'zx'
import { downloadFile, ffetch } from '../../utils/fetch.ts'
const args = minimist(process.argv.slice(2), {
string: ['entity', 'filename'],
})
const entity = args.entity ?? 'album'
const query = args._[0] ?? await question('Search query (Artist - Album): ')
const data = await ffetch('https://itunes.apple.com/search', {
query: {
term: query,
entity,
limit: 15,
},
}).parsedJson(z.object({
results: z.array(z.object({
kind: z.literal('song').optional(),
artistName: z.string(),
collectionName: z.string(),
artworkUrl100: z.string(),
releaseDate: z.string(),
trackName: z.string().optional(),
}).passthrough()),
}))
for (const [i, result] of iter.enumerate(data.results)) {
if (result.kind === 'song') {
console.log(`${i + 1}. ${result.artistName} - ${result.trackName} (${result.collectionName}, ${new Date(result.releaseDate).toLocaleDateString('ru-RU')})`)
continue
}
console.log(`${i + 1}. ${result.artistName} - ${result.collectionName} (${new Date(result.releaseDate).toLocaleDateString('ru-RU')})`)
}
console.log('Enter number to download album art:')
const number = Number.parseInt(await question('[1] > ') || '1')
const artworkUrl = data.results[number - 1].artworkUrl100.replace('100x100', '1500x1500')
await downloadFile(artworkUrl, args.filename ?? `assets/${query.replace(/\s/g, '_')}.jpg`)

View file

@ -0,0 +1,63 @@
import { iter } from '@fuman/utils'
import { z } from 'zod'
import { minimist, question } from 'zx'
import { downloadFile, ffetch } from '../../utils/fetch.ts'
const args = minimist(process.argv.slice(2), {
string: ['filename'],
})
const query = args._[0] ?? await question('Search query: ')
const data = await ffetch('https://itunes.apple.com/search', {
query: {
term: query,
entity: 'musicArtist',
limit: 15,
},
}).parsedJson(z.object({
results: z.array(z.object({
wrapperType: z.literal('artist'),
artistName: z.string(),
artistLinkUrl: z.string(),
primaryGenreName: z.string().default('Unknown'),
}).passthrough()),
}))
for (const [i, result] of iter.enumerate(data.results)) {
console.log(`${i + 1}. ${result.artistName} (${result.primaryGenreName})`)
continue
}
console.log('Enter number to download artist art:')
const number = Number.parseInt(await question('[1] > ') || '1')
const pageUrl = data.results[number - 1].artistLinkUrl
const $ = await ffetch(pageUrl).cheerio()
const pageData = JSON.parse($('#serialized-server-data').html()!)
const pageDataValidated = z.tuple([
z.object({
data: z.object({
seoData: z.object({
artworkUrl: z.string(),
}),
}),
}),
]).parse(pageData)
// {w}x{h}{c}.{f}
const artworkUrl = pageDataValidated[0].data.seoData.artworkUrl
.replace('{w}', '2500')
.replace('{h}', '2500')
.replace('{c}', 'cc')
.replace('{f}', 'jpg')
if (artworkUrl === '/assets/meta/apple-music.png') {
console.log('No artwork available')
process.exit(1)
}
await downloadFile(artworkUrl, args.filename ?? `assets/${query.replace(/\s/g, '_')}.jpg`)

View file

@ -0,0 +1,60 @@
import { createReadStream } from 'node:fs'
import { nodeReadableToFuman } from '@fuman/node'
import Database from 'better-sqlite3'
import { question } from 'zx'
import { CsvReader } from '../../utils/csv.ts'
const csvPath = process.argv[2] ?? await question('path to csv > ')
// convert csv generated by https://mainstream.ghan.nl/export.html to an sqlite database
const db = new Database('assets/lastfm-import.db')
db.exec(`
CREATE TABLE IF NOT EXISTS scrobbles (
date_uts TEXT,
artist_mbid TEXT,
artist_name TEXT,
album_mbid TEXT,
album_name TEXT,
track_mbid TEXT,
track_name TEXT
);
`)
const insertQuery = db.prepare(`
INSERT INTO scrobbles (
date_uts,
artist_mbid,
artist_name,
album_mbid,
album_name,
track_mbid,
track_name
) VALUES (?, ?, ?, ?, ?, ?, ?)
`)
const file = nodeReadableToFuman(createReadStream(csvPath))
const csv = new CsvReader(file, {
schema: ['uts', 'utc_time', 'artist', 'artist_mbid', 'album', 'album_mbid', 'track', 'track_mbid'],
})
let i = 0
while (true) {
const obj = await csv.read()
if (!obj) break
i += 1
if (i % 1000 === 0) {
console.log('inserted', i)
}
insertQuery.run(
obj.uts,
obj.artist_mbid,
obj.artist,
obj.album_mbid,
obj.album,
obj.track_mbid,
obj.track,
)
}

View file

@ -0,0 +1,505 @@
import { mkdir, rm, writeFile } from 'node:fs/promises'
import { join } from 'node:path'
import { ffetchAddons } from '@fuman/fetch'
import { assert, asyncPool, base64, sleep } from '@fuman/utils'
import { load } from 'cheerio'
import Spinnies from 'spinnies'
import { ProxyAgent } from 'undici'
import { z } from 'zod'
import { $, ProcessOutput, question } from 'zx'
import { downloadFile, ffetch as ffetchBase } from '../../utils/fetch.ts'
import { sanitizeFilename } from '../../utils/fs.ts'
import { chunks, getEnv } from '../../utils/misc.ts'
import { generateOpusImageBlob } from '../../utils/media-metadata.ts'
const ffetchApi = ffetchBase.extend({
baseUrl: 'https://api-v2.soundcloud.com',
query: {
client_id: '4BowhSywvkJtklODQDzjNMq9sK9wyDJ4',
app_version: '1738322252',
app_locale: 'en',
},
addons: [
ffetchAddons.rateLimitHandler(),
],
headers: {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
'Authorization': `OAuth ${getEnv('SOUNDCLOUD_TOKEN')}`,
},
})
const ffetchHtml = ffetchBase.extend({
baseUrl: 'https://soundcloud.com',
headers: {
Cookie: `oauth_token=${getEnv('SOUNDCLOUD_TOKEN')}`,
},
})
const ScTrack = z.object({
id: z.number(),
kind: z.literal('track'),
title: z.string(),
duration: z.number(),
description: z.string().nullable(),
permalink_url: z.string(),
artwork_url: z.string().transform(s => s.replace('-large.jpg', '-t500x500.jpg')).nullable(),
media: z.object({
transcodings: z.array(z.object({
url: z.string(),
preset: z.string(),
format: z.object({
protocol: z.string(),
mime_type: z.string(),
}),
quality: z.string(),
is_legacy_transcoding: z.boolean(),
})),
}),
track_authorization: z.string(),
user: z.object({
username: z.string(),
permalink: z.string(),
}),
})
type ScTrack = z.infer<typeof ScTrack>
const ScPlaylist = z.object({
id: z.number(),
title: z.string(),
duration: z.number(),
permalink_url: z.string(),
genre: z.string().nullish(),
description: z.string().nullish(),
track_count: z.number(),
user: z.object({
username: z.string(),
}),
tracks: z.array(z.union([
ScTrack,
z.object({
id: z.number(),
kind: z.literal('track'),
}),
])).default(() => []),
})
type ScPlaylist = z.infer<typeof ScPlaylist>
const ScUser = z.object({
id: z.number(),
kind: z.literal('user'),
permalink_url: z.string(),
username: z.string(),
likes_count: z.number(),
track_count: z.number(),
playlist_likes_count: z.number(),
})
type ScUser = z.infer<typeof ScUser>
const ScLike = z.object({
created_at: z.string(),
kind: z.literal('like'),
track: ScTrack.optional(),
playlist: ScPlaylist.optional(),
})
function extractHydrationData(html: string) {
const $ = load(html)
const script = $('script:contains(window.__sc_hydration = )')
return JSON.parse(script.html()!.replace('window.__sc_hydration = ', '').slice(0, -1))
}
async function fetchPlaylistByUrl(url: string) {
const html = await ffetchHtml(url).text()
const hydrationData = extractHydrationData(html)
const playlist = hydrationData.find(it => it.hydratable === 'playlist')
if (!playlist) throw new Error('no playlist found')
return ScPlaylist.parse(playlist.data)
}
async function fetchPlaylistById(id: number) {
return ffetchApi(`/playlists/${id}`, {
query: {
linked_partitioning: '1',
},
}).parsedJson(ScPlaylist)
}
async function fetchTracksById(trackIds: number[]) {
return ffetchApi('/tracks', {
query: {
ids: trackIds.join(','),
},
}).parsedJson(z.array(ScTrack))
}
async function downloadTrack(track: ScTrack, opts: {
/* download destination (filename without extension) */
destination: string
onRateLimit?: (waitTime: number) => void
onCdnRateLimit?: () => void
}) {
const artworkPath = join('assets', `sc-tmp-${track.id}.jpg`)
const artworkBytes = track.artwork_url ? new Uint8Array(await ffetchHtml(track.artwork_url).arrayBuffer()) : null
// find the best transcoding
let transcoding!: typeof track.media.transcodings[0]
for (const t of track.media.transcodings) {
if (t.quality === 'hq') {
transcoding = t
break
}
if (t.preset === 'opus_0_0') {
transcoding = t
break
}
transcoding = t
}
const { url: hlsUrl } = await ffetchApi(transcoding.url, {
query: {
track_authorization: track.track_authorization,
},
rateLimit: {
isRejected(res) {
return res.status === 429
},
defaultWaitTime: 60_000,
maxRetries: 10,
onRateLimitExceeded(res, waitTime) {
opts.onRateLimit?.(waitTime)
},
},
}).parsedJson(z.object({
url: z.string(),
}))
let ext = transcoding.format.mime_type.match(/^audio\/(\w+)(;|$)/)![1]
if (ext === 'mp4') ext = 'm4a'
const filename = `${opts.destination}.${ext}`
const params: string[] = [
'-y',
'-i',
hlsUrl,
]
if (artworkBytes) {
if (ext === 'mp3') {
await writeFile(artworkPath, artworkBytes)
params.push(
'-i',
artworkPath,
'-map',
'1:v:0',
'-id3v2_version',
'3',
'-metadata:s:v',
'title=Album cover',
'-metadata:s:v',
'comment=Cover (front)',
)
} else if (ext === 'ogg') {
const blob = base64.encode(await generateOpusImageBlob(artworkBytes))
params.push(
'-metadata',
`metadata_block_picture=${blob}`,
)
} else if (ext === 'm4a') {
await writeFile(artworkPath, artworkBytes)
params.push(
'-i',
artworkPath,
'-map',
'1',
'-disposition:v',
'attached_pic',
)
}
}
params.push(
'-map',
'0:a',
'-c',
'copy',
'-metadata',
`title=${track.title}`,
'-metadata',
`artist=${track.user.username}`,
'-metadata',
`comment=${track.description ?? ''}`,
filename,
)
while (true) {
try {
await $`ffmpeg ${params}`.quiet(true)
break
} catch (e) {
if (!(e instanceof ProcessOutput)) {
throw e
}
if (e.stderr.includes('429 Too Many Requests')) {
opts.onCdnRateLimit?.()
await sleep(10_000)
continue
}
throw e
}
}
await rm(artworkPath, { force: true })
}
async function downloadPlaylist(playlist: ScPlaylist, params: {
destination?: string
} = {}) {
const tracks: ScTrack[] = []
const tracksToFetch = new Set<number>()
const trackIdToPosition = new Map<number, number>()
for (let i = 0; i < playlist.tracks.length; i++) {
const track = playlist.tracks[i]
trackIdToPosition.set(track.id, i + 1)
if ('user' in track) {
tracks.push(track)
} else {
tracksToFetch.add(track.id)
}
}
const spinnies = new Spinnies()
if (tracksToFetch.size) {
let remaining = tracksToFetch.size
spinnies.add('fetching', { text: `fetching ${remaining} tracks` })
await asyncPool(chunks(Array.from(tracksToFetch), 20), async (ids) => {
const res = await fetchTracksById(Array.from(ids))
for (const track of res) {
tracks.push(track)
}
remaining -= ids.length
spinnies.update('fetching', { text: `fetching ${remaining} tracks` })
})
spinnies.succeed('fetching', { text: `fetched ${tracks.length} tracks` })
}
const destDir = params.destination ?? join('assets/soundcloud-dl', sanitizeFilename(`${playlist.user.username} - ${playlist.title}`))
await mkdir(destDir, { recursive: true })
const posPadSize = Math.ceil(Math.log10(tracks.length))
await asyncPool(tracks, async (track) => {
const position = trackIdToPosition.get(track.id)!
const filename = `${position.toString().padStart(posPadSize, '0')}. ${track.user.username} - ${track.title}`
spinnies.add(`${track.id}`, { text: filename })
await downloadTrack(track, {
destination: join(destDir, sanitizeFilename(filename)),
onRateLimit: (wait) => {
spinnies.update(`${track.id}`, { text: `[rate limit ${Math.floor(wait / 1000)}s] ${filename}` })
},
onCdnRateLimit: () => {
spinnies.update(`${track.id}`, { text: `[cdn rate limit] ${filename}` })
},
})
spinnies.remove(`${track.id}`)
})
console.log('done')
spinnies.stopAll()
}
async function downloadLikes(username: string) {
const spinnies = new Spinnies()
spinnies.add('collect', { text: 'collecting likes...' })
const userPage = await ffetchHtml(`/${username}`).text()
const hydrationData = extractHydrationData(userPage)
const user = hydrationData.find(it => it.hydratable === 'user')
if (!user) throw new Error('no user found')
const userData = ScUser.parse(user.data)
const tracks: ScTrack[] = []
const playlists: ScPlaylist[] = []
const updateSpinner = () => {
const percent = Math.floor((tracks.length + playlists.length) / (userData.likes_count + userData.playlist_likes_count) * 100)
spinnies.update('collect', {
text: `[${percent}%] collecting liked tracks: ${tracks.length}/${userData.likes_count}, playlists: ${playlists.length}/${userData.playlist_likes_count}`,
})
}
updateSpinner()
let offset = '0'
while (true) {
const res = await ffetchApi(`/users/${userData.id}/likes`, {
query: {
limit: 100,
offset,
linked_partitioning: '1',
},
}).parsedJson(z.object({
collection: z.array(ScLike),
next_href: z.string().nullable(),
}))
for (const like of res.collection) {
if (like.track) {
tracks.push(like.track)
} else if (like.playlist) {
playlists.push(like.playlist)
} else {
console.warn('unknown like type:', like.created_at)
}
}
updateSpinner()
if (!res.next_href) break
offset = new URL(res.next_href).searchParams.get('offset')!
}
spinnies.succeed('collect', { text: `collected ${tracks.length} tracks and ${playlists.length} playlists` })
spinnies.add('tracks', { text: 'downloading tracks...' })
let downloaded = 0
const updateTracksSpinner = () => {
spinnies.update('tracks', { text: `[${downloaded}/${tracks.length}] downloading tracks...` })
}
updateTracksSpinner()
const baseDir = join('assets/soundcloud-dl', `${sanitizeFilename(username)}-likes`)
await mkdir(baseDir, { recursive: true })
await asyncPool(tracks, async (track) => {
const filename = `${track.user.username} - ${track.title}`
spinnies.add(`${track.id}`, { text: filename })
await downloadTrack(track, {
destination: join(baseDir, sanitizeFilename(filename)),
onRateLimit: (wait) => {
spinnies.update(`${track.id}`, { text: `[rate limit ${Math.floor(wait / 1000)}s] ${filename}` })
},
onCdnRateLimit: () => {
spinnies.update(`${track.id}`, { text: `[cdn rate limit] ${filename}` })
},
})
spinnies.remove(`${track.id}`)
downloaded += 1
updateTracksSpinner()
})
spinnies.succeed('tracks', { text: `downloaded ${downloaded} tracks` })
spinnies.stopAll()
for (const playlist of playlists) {
console.log(`\uDB83\uDCB8 ${playlist.title}`)
const fullPlaylist = await fetchPlaylistById(playlist.id)
await downloadPlaylist(fullPlaylist, {
destination: join(baseDir, sanitizeFilename(`${playlist.user.username} - ${playlist.title}`)),
})
}
}
async function downloadUser(user: ScUser) {
const tracks: ScTrack[] = []
const spinnies = new Spinnies()
spinnies.add('collect')
const updateSpinner = () => {
const percent = Math.floor(tracks.length / user.track_count * 100)
spinnies.update('collect', {
text: `[${percent}%] collecting user tracks: ${tracks.length}/${user.track_count}`,
})
}
updateSpinner()
let offset = '0'
while (true) {
const res = await ffetchApi(`/users/${user.id}/tracks`, {
query: {
limit: 100,
offset,
linked_partitioning: '1',
},
}).parsedJson(z.object({
collection: z.array(ScTrack),
next_href: z.string().nullable(),
}))
for (const track of res.collection) {
tracks.push(track)
}
updateSpinner()
if (!res.next_href) break
offset = new URL(res.next_href).searchParams.get('offset')!
}
spinnies.succeed('collect', { text: `collected ${tracks.length} tracks` })
spinnies.add('tracks', { text: 'downloading tracks...' })
let downloaded = 0
const updateTracksSpinner = () => {
spinnies.update('tracks', { text: `[${downloaded}/${tracks.length}] downloading tracks...` })
}
updateTracksSpinner()
const baseDir = join('assets/soundcloud-dl', `${sanitizeFilename(user.username)}-tracks`)
await mkdir(baseDir, { recursive: true })
await asyncPool(tracks, async (track) => {
const filename = track.title
spinnies.add(`${track.id}`, { text: filename })
await downloadTrack(track, {
destination: join(baseDir, sanitizeFilename(filename)),
onRateLimit: (wait) => {
spinnies.update(`${track.id}`, { text: `[rate limit ${Math.floor(wait / 1000)}s] ${filename}` })
},
onCdnRateLimit: () => {
spinnies.update(`${track.id}`, { text: `[cdn rate limit] ${filename}` })
},
})
downloaded += 1
spinnies.remove(`${track.id}`)
updateTracksSpinner()
})
spinnies.succeed('tracks', { text: `downloaded ${downloaded} tracks` })
spinnies.stopAll()
}
const url = process.argv[2] ?? await question('url > ')
if (!url.startsWith('https://soundcloud.com/')) {
console.error('url must start with https://soundcloud.com/')
process.exit(1)
}
if (url.match(/^https:\/\/soundcloud.com\/[a-z0-9-]+\/sets\//i)) {
await downloadPlaylist(await fetchPlaylistByUrl(url))
} else if (url.match(/^https:\/\/soundcloud.com\/[a-z0-9-]+\/likes/i)) {
await downloadLikes(url.match(/^https:\/\/soundcloud.com\/([a-z0-9-]+)\/likes/i)![1])
} else {
const html = await ffetchHtml(url).text()
const hydrationData = extractHydrationData(html)
const trackData = hydrationData.find(it => it.hydratable === 'sound')
if (trackData) {
const track = ScTrack.parse(trackData.data)
const filename = `${track.user.username} - ${track.title}`
console.log('downloading track:', filename)
await downloadTrack(track, {
destination: join('assets/soundcloud-dl', sanitizeFilename(filename)),
})
} else {
const userData = hydrationData.find(it => it.hydratable === 'user')
if (userData) {
const user = ScUser.parse(userData.data)
await downloadUser(user)
}
}
}

465
scripts/media/tidal-dl.ts Normal file
View file

@ -0,0 +1,465 @@
import { randomUUID } from 'node:crypto'
import { mkdir, rm, writeFile } from 'node:fs/promises'
import { dirname, join } from 'node:path'
import { asyncPool, AsyncQueue, base64, todo, unknownToError, utf8 } from '@fuman/utils'
import Spinnies from 'spinnies'
import { z } from 'zod'
import { $, question } from 'zx'
import { ffetch as ffetchBase } from '../../utils/fetch.ts'
import { sanitizeFilename } from '../../utils/fs.ts'
import { pipeIntoProc, runMetaflac, writeIntoProc } from '../../utils/media-metadata.ts'
import { getEnv } from '../../utils/misc.ts'
import { concatMpdSegments, parseSimpleMpd } from '../../utils/mpd.ts'
import { createLibcurlFetch } from '../../utils/temkakit/libcurl.ts'
const oauthResponse = await ffetchBase('https://auth.tidal.com/v1/oauth2/token', {
form: {
client_id: '49YxDN9a2aFV6RTG',
grant_type: 'refresh_token',
scope: 'r_usr w_usr',
refresh_token: getEnv('TIDAL_REFRESH_TOKEN'),
},
}).parsedJson(z.object({
access_token: z.string(),
user: z.object({
username: z.string(),
countryCode: z.string(),
}),
}))
console.log('Logged in as %s', oauthResponse.user.username)
const ffetch = ffetchBase.extend({
headers: {
'accept': '*/*',
'Authorization': `Bearer ${oauthResponse.access_token}`,
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://tidal.com/',
'origin': 'https://tidal.com',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
},
// for some reason the request sometimes hangs indefinitely, so we need to timeout
timeout: 5000,
retry: {
maxRetries: 3,
// onError: (err, req) => {
// console.log('%s: error: %s', req.url, err)
// return true
// },
},
})
const PlaybackInfoResult = z.object({
albumPeakAmplitude: z.number(),
albumReplayGain: z.number(),
assetPresentation: z.string(),
audioMode: z.string(),
audioQuality: z.enum(['HIGH', 'LOSSLESS', 'HI_RES_LOSSLESS']),
bitDepth: z.number(),
manifest: z.string(),
manifestHash: z.string(),
manifestMimeType: z.literal('application/dash+xml'),
sampleRate: z.number(),
streamingSessionId: z.string(),
trackId: z.number(),
trackPeakAmplitude: z.number(),
trackReplayGain: z.number(),
})
const streamingSessionId = randomUUID()
const TidalTrack = z.object({
id: z.number(),
album: z.object({
id: z.number(),
cover: z.string(),
}),
artists: z.array(z.object({
id: z.number(),
name: z.string(),
})),
isrc: z.string().nullable(),
trackNumber: z.number(),
volumeNumber: z.number(),
title: z.string(),
copyright: z.string().nullable(),
version: z.string().nullable(),
bpm: z.number().nullable(),
})
type TidalTrack = z.infer<typeof TidalTrack>
function getTrackName(track: TidalTrack) {
let name = track.title
if (track.version) {
name += ` ${track.version}`
}
return name
}
function getTrackArtistString(track: TidalTrack | TidalAlbum) {
return track.artists.map(it => it.name).join(', ')
}
function getAlbumCoverUrl(uuid: string) {
return `https://resources.tidal.com/images/${uuid.replace(/-/g, '/')}/1280x1280.jpg`
}
const TidalAlbum = z.object({
id: z.number(),
title: z.string(),
cover: z.string(),
releaseDate: z.string(),
artists: z.array(z.object({
id: z.number(),
name: z.string(),
})),
})
type TidalAlbum = z.infer<typeof TidalAlbum>
const COMMON_QUERY = {
countryCode: oauthResponse.user.countryCode,
locale: 'en_US',
deviceType: 'BROWSER',
}
async function downloadTrack(options: {
track: TidalTrack
album: TidalAlbum
albumCoverPath?: string
destination: string
}) {
const { track, album, albumCoverPath, destination } = options
const [playbackRes, lyricsRes, creditsRes] = [
await ffetch(`https://tidal.com/v1/tracks/${track.id}/playbackinfo`, {
query: {
audioquality: 'HI_RES_LOSSLESS',
playbackmode: 'STREAM',
assetpresentation: 'FULL',
},
headers: {
'x-tidal-streamingsessionid': streamingSessionId,
'x-tidal-token': '49YxDN9a2aFV6RTG',
},
}).parsedJson(PlaybackInfoResult),
await ffetch(`https://tidal.com/v1/tracks/${track.id}/lyrics`, {
query: {
...COMMON_QUERY,
},
}).parsedJson(z.object({
lyrics: z.string(),
// subtitles = timestamped lyrics
subtitles: z.string().nullable(),
})).catch(() => null),
await ffetch(`https://tidal.com/v1/tracks/${track.id}/credits`, {
query: {
limit: 100,
includeContributors: true,
...COMMON_QUERY,
},
}).parsedJson(z.array(z.object({
type: z.string(),
contributors: z.array(z.object({
name: z.string(),
})),
}))),
]
const manifest = base64.decode(playbackRes.manifest)
const ext = playbackRes.audioQuality === 'HIGH' ? 'm4a' : 'flac'
const destFile = `${destination}.${ext}`
await mkdir(dirname(destFile), { recursive: true })
const lyricsLrc = lyricsRes ? lyricsRes.subtitles ?? lyricsRes.lyrics : undefined
const keyedCredits = creditsRes
? Object.fromEntries(creditsRes.map(it => [it.type, it.contributors.map(it => it.name)]))
: undefined
const params: string[] = [
'-y',
'-i',
'pipe:0',
'-c',
'copy',
'-loglevel',
'error',
'-hide_banner',
destFile,
]
const proc = $`ffmpeg ${params}`
await pipeIntoProc(proc, concatMpdSegments({
mpd: parseSimpleMpd(utf8.decoder.decode(manifest)),
fetch: async url => new Uint8Array(await ffetch(url).arrayBuffer()),
}))
await proc
if (ext === 'flac') {
await runMetaflac({
path: destFile,
tags: {
TITLE: getTrackName(track),
ALBUM: album.title,
DATE: album.releaseDate,
DISCNUMBER: track.volumeNumber,
TRACKNUMBER: track.trackNumber,
COMMENT: `ripped from tidal (id: ${track.id})`,
ARTIST: track.artists.map(it => it.name),
COPYRIGHT: track.copyright,
LYRICS: lyricsLrc,
REPLAYGAIN_ALBUM_GAIN: playbackRes.albumReplayGain,
REPLAYGAIN_ALBUM_PEAK: playbackRes.albumPeakAmplitude,
REPLAYGAIN_TRACK_GAIN: playbackRes.trackReplayGain,
REPLAYGAIN_TRACK_PEAK: playbackRes.trackPeakAmplitude,
PRODUCER: keyedCredits?.Producer,
COMPOSER: keyedCredits?.Composer,
LYRICIST: keyedCredits?.Lyricist,
PERFORMER: keyedCredits?.['Vocal accompaniment']?.map(it => `${it} (Vocal)`),
ISRC: track.isrc,
BPM: track.bpm,
},
coverPath: albumCoverPath,
})
} else {
console.log('warn: m4a tagging not yet implemented')
}
}
async function fetchPaginated<T>(params: {
initialOffset?: number
fetch: (offset: number) => Promise<{ items: T[], hasMore: boolean }>
}): Promise<T[]> {
let offset = params.initialOffset ?? 0
const items: T[] = []
while (true) {
const res = await params.fetch(offset)
for (const item of res.items) {
items.push(item)
}
if (!res.hasMore) break
offset += res.items.length
}
return items
}
async function fetchAlbumTracks(albumId: number) {
return fetchPaginated({
fetch: async (offset) => {
const res = await ffetch(`https://tidal.com/v1/albums/${albumId}/items`, { query: {
...COMMON_QUERY,
replace: true,
offset,
limit: 100,
} }).parsedJson(z.object({
items: z.array(z.object({
item: TidalTrack,
type: z.literal('track'),
})),
totalNumberOfItems: z.number(),
}))
return {
items: res.items.map(it => it.item),
hasMore: res.totalNumberOfItems > offset + res.items.length,
}
},
})
}
async function downloadTrackList(opts: {
tracks: TidalTrack[]
albums: Map<number, TidalAlbum>
albumCoverPaths: Map<number, string>
destination: string
includeTrackNumber?: boolean
onDownloadStart?: (track: TidalTrack) => void
onDownloadEnd?: (track: TidalTrack, error: Error | null) => void
}) {
await mkdir(opts.destination, { recursive: true })
const isMultiDisc = opts.tracks.some(it => it.volumeNumber !== 1)
const firstTrackArtistString = getTrackArtistString(opts.tracks[0])
const isDifferentArtists = opts.tracks.some(it => getTrackArtistString(it) !== firstTrackArtistString)
const retries = new Map<number, number>()
const queue = new AsyncQueue(opts.tracks)
let finished = 0
await asyncPool(queue, async (track, idx) => {
let filename = ''
if (opts.includeTrackNumber) {
if (isMultiDisc) {
filename = `${track.volumeNumber}-`
}
filename = `${track.trackNumber.toString().padStart(2, '0')}. `
}
if (isDifferentArtists) {
filename += `${getTrackArtistString(track)} - `
}
filename += `${getTrackName(track)}`
const filenamePath = join(opts.destination, sanitizeFilename(filename))
try {
opts.onDownloadStart?.(track)
await downloadTrack({
track,
album: opts.albums.get(track.album.id)!,
albumCoverPath: opts.albumCoverPaths.get(track.album.id)!,
destination: filenamePath,
})
opts.onDownloadEnd?.(track, null)
} catch (e) {
opts.onDownloadEnd?.(track, unknownToError(e))
const n = retries.get(track.id) ?? 0
if (n < 3) {
retries.set(track.id, n + 1)
queue.enqueue(track)
return
}
}
finished += 1
if (finished === opts.tracks.length) {
queue.end()
}
}, { limit: 8 })
}
async function downloadAlbum(album: TidalAlbum | number) {
const [albumRes, albumTracks] = await Promise.all([
typeof album === 'number'
? ffetch(`https://tidal.com/v1/albums/${album}`, { query: COMMON_QUERY }).parsedJson(TidalAlbum)
: Promise.resolve(album),
fetchAlbumTracks(typeof album === 'number' ? album : album.id),
])
console.log(`downloading album ${albumRes.title} with ${albumTracks.length} tracks`)
const outDir = join('assets/tidal-dl', `${getTrackArtistString(albumRes)} - ${sanitizeFilename(albumRes.title)}`)
await mkdir(outDir, { recursive: true })
const albumCoverRes = await ffetch(getAlbumCoverUrl(albumRes.cover)).arrayBuffer()
await writeFile(join(outDir, 'cover.jpg'), new Uint8Array(albumCoverRes))
const spinnies = new Spinnies()
spinnies.add('download', { text: 'downloading album...' })
const errors = new Map<number, Error>()
await downloadTrackList({
tracks: albumTracks,
albums: new Map([[albumRes.id, albumRes]]),
albumCoverPaths: new Map([[albumRes.id, join(outDir, 'cover.jpg')]]),
destination: outDir,
includeTrackNumber: true,
onDownloadStart(track) {
spinnies.add(`${track.id}`, { text: getTrackName(track) })
errors.delete(track.id)
},
onDownloadEnd(track, error) {
spinnies.remove(`${track.id}`)
if (error) {
errors.set(track.id, error)
}
spinnies.remove(`${track.id}`)
},
})
spinnies.succeed('download', { text: 'downloaded album' })
if (errors.size) {
console.error('errors:')
for (const [id, error] of errors) {
console.error(` ${id}: ${error.message}`)
}
}
}
const url = process.argv[2] ?? await question('url or search > ')
/* eslint-disable no-cond-assign */
let m
if ((m = url.match(/\/track\/(\d+)/))) {
const track = await ffetch(`https://tidal.com/v1/tracks/${m[1]}`, { query: COMMON_QUERY })
.parsedJson(TidalTrack)
const [albumRes, albumCoverRes] = await Promise.all([
ffetch(`https://tidal.com/v1/albums/${track.album.id}`, { query: COMMON_QUERY }).parsedJson(TidalAlbum),
ffetch(getAlbumCoverUrl(track.album.cover)).arrayBuffer(),
])
const tmpAlbumCoverPath = join(`assets/tidal-${track.album.cover}.jpg`)
await writeFile(tmpAlbumCoverPath, new Uint8Array(albumCoverRes))
await downloadTrack({
track,
album: albumRes,
albumCoverPath: tmpAlbumCoverPath,
destination: join('assets/tidal-dl', sanitizeFilename(`${getTrackArtistString(track)} - ${getTrackName(track)}`)),
})
await rm(tmpAlbumCoverPath)
} else if ((m = url.match(/\/album\/(\d+)/))) {
await downloadAlbum(m[1])
} else if ((m = url.match(/\/artist\/(\d+)/))) {
const withAppearsOn = (await question('include appears on albums? (y/N) > ')).toLowerCase() === 'y'
function fetchAlbumList(type: string): Promise<TidalAlbum[]> {
return fetchPaginated({
fetch: async (offset) => {
const r = await ffetch(`https://tidal.com/v2/artist/${type}/view-all`, {
query: {
itemId: m[1],
...COMMON_QUERY,
platform: 'WEB',
limit: 50,
offset,
},
headers: {
'x-tidal-client-version': '2025.10.29',
},
}).parsedJson(z.object({
items: z.array(z.object({
type: z.literal('ALBUM'),
data: TidalAlbum,
})),
}))
return {
items: r.items.map(it => it.data),
hasMore: r.items.length === 50,
}
},
})
}
const [albums, singles, appearsOn] = await Promise.all([
fetchAlbumList('ARTIST_ALBUMS'),
fetchAlbumList('ARTIST_TOP_SINGLES'),
withAppearsOn ? fetchAlbumList('ARTIST_APPEARS_ON') : Promise.resolve([]),
])
// concat and dedupe
const seenIds = new Set<number>()
const allAlbums: TidalAlbum[] = []
for (const album of [...albums, ...singles, ...appearsOn]) {
if (seenIds.has(album.id)) continue
seenIds.add(album.id)
allAlbums.push(album)
}
console.log('found %d albums', allAlbums.length)
for (const album of allAlbums) {
await downloadAlbum(album)
}
} else {
todo('unsupported url')
}

View file

@ -0,0 +1,17 @@
import { asyncPool } from '@fuman/utils'
import { argv, question } from 'zx'
import { counterIter } from '../../utils/counter.ts'
const url = argv[1] || await question('url > ')
const count = argv[2] || await question('count > ')
const counter = counterIter(0, count)
await asyncPool(counter.iter, async (i) => {
if (i % 100 === 0) {
console.log('currently at %d', i)
}
await fetch(url)
})
console.log('nakrutka done')

1
scripts/misc/shikimori/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/_very-secret-ratelimit-bypass.ts

View file

@ -0,0 +1,53 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const isManga = process.argv[2] === 'manga'
const isRanobe = process.argv[2] === 'ranobe'
const collection = isManga ? 'mangas' : isRanobe ? 'ranobe' : 'animes'
const db = new Database('assets/shikimori.db')
db.exec(`
create table if not exists ${collection} (
id integer primary key,
data text not null
);
create table if not exists ${collection}_related (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare(`insert into ${collection} (id, data) values (?, ?) on conflict (id) do update set data = excluded.data`)
const insertRelatedQuery = db.prepare(`insert into ${collection}_related (id, data) values (?, ?) on conflict (id) do update set data = excluded.data`)
const maxId = await ffetchShiki(`/api/${collection}?order=id_desc`).json<any>().then(res => res[0].id)
console.log('max id: %d', maxId)
const counter = counterIter(1, maxId)
await asyncPool(counter.iter, async (id) => {
if (id % 1000 === 0) {
console.log('currently at %d', id)
}
// const data = await ffetchShiki(`/api/${collection}/${id}`, {
// validateResponse: false,
// }).json<any>()
// if (data.code === 404) {
// return
// }
// insertQuery.run(id, JSON.stringify(data))
const data = await ffetchShiki(`/api/${collection}/${id}/related`, {
validateResponse: false,
}).json<any>()
if (data.code === 404) {
return
}
insertRelatedQuery.run(id, JSON.stringify(data))
}, { limit: 64 })

View file

@ -0,0 +1,30 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.exec(`
create table if not exists bans (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into bans (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
const counter = counterIter(1)
await asyncPool(counter.iter, async (page) => {
if (page % 100 === 0) {
console.log('currently at page %d', page)
}
const data = await ffetchShiki(`/api/bans?page=${page}`).json<any>()
if (!data.length) {
counter.end()
return
}
for (const ban of data) {
insertQuery.run(ban.id, JSON.stringify(ban))
}
}, { limit: 64 })

View file

@ -0,0 +1,59 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.pragma('journal_mode = WAL')
db.exec(`
create table if not exists characters (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into characters (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
// find maxId with binary search
let maxIdPage = 20000
let maxIdPageStart = 1
let maxId = 0
while (true) {
const midPage = Math.floor((maxIdPageStart + maxIdPage) / 2)
console.log('trying page %d', midPage)
const res = await ffetchShiki.post('/api/graphql', {
json: {
query: `{characters(page: ${midPage}, limit: 50) { id }}`,
},
}).json<any>()
const items = res.data.characters
if (!items.length) {
maxIdPage = midPage - 1
continue
}
if (maxIdPageStart === midPage) {
maxId = Math.max(...items.map(item => item.id))
break
} else {
maxIdPageStart = midPage
}
}
console.log('max id: %d', maxId)
const counter = counterIter(1, maxId)
await asyncPool(counter.iter, async (id) => {
if (id % 1000 === 0) {
console.log('currently at %d', id)
}
const data = await ffetchShiki(`/api/characters/${id}`, {
validateResponse: false,
}).json<any>()
if (data.code === 404) {
return
}
insertQuery.run(id, JSON.stringify(data))
}, { limit: 64 })

View file

@ -0,0 +1,49 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.pragma('journal_mode = WAL')
db.exec(`
create table if not exists clubs (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into clubs (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
// collect clubs ids
const ids: Set<number> = new Set()
const pageCounter = counterIter(1)
await asyncPool(pageCounter.iter, async (page) => {
const data = await ffetchShiki('/api/clubs', {
query: { page, limit: 50 },
validateResponse: false,
}).json<any>()
if (!data.length) {
pageCounter.end()
return
}
for (const club of data) {
ids.add(club.id)
}
}, { limit: 16 })
console.log('collected %d clubs', ids.size)
await asyncPool(ids, async (id, idx) => {
if (idx % 100 === 0) {
console.log('currently at %d', idx)
}
const clubData = await ffetchShiki(`/api/clubs/${id}`).json<any>()
if (clubData.code === 404) {
return
}
insertQuery.run(id, JSON.stringify(clubData))
}, { limit: 64 })

View file

@ -0,0 +1,37 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.pragma('journal_mode = WAL')
db.exec(`
create table if not exists comments (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into comments (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
const counter = counterIter(11312000)
let consequent404 = 0
await asyncPool(counter.iter, async (id) => {
if (id % 1000 === 0) {
console.log('currently at %d', id)
}
const data = await ffetchShiki(`/api/comments/${id}`, {
validateResponse: false,
}).json<any>()
if (data.code === 404) {
consequent404++
if (consequent404 > 10_000) {
counter.end()
console.log('10k consequent 404-s, stopping')
}
return
}
consequent404 = 0
insertQuery.run(id, JSON.stringify(data))
}, { limit: 64 })

View file

@ -0,0 +1,59 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.pragma('journal_mode = WAL')
db.exec(`
create table if not exists people (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into people (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
// find maxId with binary search
let maxIdPage = 20000
let maxIdPageStart = 1
let maxId = 0
while (true) {
const midPage = Math.floor((maxIdPageStart + maxIdPage) / 2)
console.log('trying page %d', midPage)
const res = await ffetchShiki.post('/api/graphql', {
json: {
query: `{people(page: ${midPage}, limit: 50) { id }}`,
},
}).json<any>()
const items = res.data.people
if (!items.length) {
maxIdPage = midPage - 1
continue
}
if (maxIdPageStart === midPage) {
maxId = Math.max(...items.map(item => item.id))
break
} else {
maxIdPageStart = midPage
}
}
console.log('max id: %d', maxId)
const counter = counterIter(1, maxId)
await asyncPool(counter.iter, async (id) => {
if (id % 1000 === 0) {
console.log('currently at %d', id)
}
const data = await ffetchShiki(`/api/people/${id}`, {
validateResponse: false,
}).json<any>()
if (data.code === 404) {
return
}
insertQuery.run(id, JSON.stringify(data))
}, { limit: 64 })

View file

@ -0,0 +1,129 @@
import { asyncPool } from '@fuman/utils'
import Database from 'better-sqlite3'
import { counterIter, ffetchShiki } from './utils.ts'
const db = new Database('assets/shikimori.db')
db.pragma('journal_mode = WAL')
db.exec(`
create table if not exists users (
id integer primary key,
data text not null
);
`)
const insertQuery = db.prepare('insert into users (id, data) values (?, ?) on conflict (id) do update set data = excluded.data')
async function fetchUserFriends(userId: number) {
const list: any[] = []
for (let page = 1; ; page++) {
const data = await ffetchShiki(`/api/users/${userId}/friends`, {
query: { page, limit: 100 },
validateResponse: false,
}).json<any>()
if (!data.length) {
break
}
list.push(...data)
}
return list
}
async function fetchUserRates(userId: number, kind: 'anime' | 'manga') {
const list: any[] = []
for (let page = 1; ; page++) {
const data = await ffetchShiki(`/api/users/${userId}/${kind}_rates`, {
query: { page, limit: 1000 },
validateResponse: false,
}).json<any>()
if (data === null || !data.length) {
break
}
for (const item of data) {
// clean up unnecessary data before inserting
delete item.user
if (item[kind]) {
item[`${kind}_id`] = item[kind].id
delete item[kind]
}
list.push(item)
}
}
return list
}
async function fetchUserHistory(userId: number) {
const list: any[] = []
for (let page = 0; ; page++) {
const data = await ffetchShiki(`/api/users/${userId}/history`, {
query: { page, limit: 100 },
validateResponse: false,
}).json<any>()
if (!data.length) {
break
}
for (const item of data) {
if (item.target) {
item.target_type = item.target.url.startsWith('/animes/') ? 'anime' : 'manga'
item.target_id = item.target.id
delete item.target
}
list.push(item)
}
}
return list
}
const counter = counterIter(467800)
let consequent404 = 0
await asyncPool(counter.iter, async (id) => {
if (id % 100 === 0) {
console.log('currently at %d', id)
}
const data = await ffetchShiki(`/api/users/${id}`, {
validateResponse: false,
}).json<any>()
if (data.code === 404) {
consequent404++
if (consequent404 > 1_000) {
counter.end()
console.log('1k consequent 404-s, stopping')
}
return
}
consequent404 = 0
// fetch extra data
const [
favsData,
friends,
animeRates,
mangaRates,
history,
] = await Promise.all([
ffetchShiki(`/api/users/${id}/favourites`).json<any>(),
fetchUserFriends(id),
fetchUserRates(id, 'anime'),
fetchUserRates(id, 'manga'),
fetchUserHistory(id),
])
data._extra = {
favs: favsData,
friends,
animeRates,
mangaRates,
history,
}
insertQuery.run(id, JSON.stringify(data))
}, { limit: 32 })

View file

@ -0,0 +1,15 @@
import { ffetch as ffetchBase } from '../../../utils/fetch.ts'
import { rateLimitBypass } from './_very-secret-ratelimit-bypass.ts'
export const ffetchShiki = ffetchBase.extend({
baseUrl: 'https://shikimori.one',
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
},
retry: {},
...(rateLimitBypass as any),
})
export { counterIter } from '../../../utils/counter.ts'

View file

@ -0,0 +1,223 @@
#!/usr/bin/env tsx
import { ffetch } from '../../utils/fetch.ts'
import { getEnv } from '../../utils/misc.ts'
// context: had a discussion in a group chat about which day of the week albums are usually released on, needed a way to find out
// the script is mostly vibe-coded but i have no intentions to run it more than once so who cares
interface SpotifyTrack {
track: {
id: string
name: string
album: {
id: string
name: string
release_date: string
release_date_precision: 'year' | 'month' | 'day'
}
artists: Array<{
name: string
}>
}
}
interface SpotifyAlbum {
id: string
name: string
release_date: string
release_date_precision: 'year' | 'month' | 'day'
artists: Array<{
name: string
}>
}
interface SpotifyResponse<T> {
items: T[]
next: string | null
total: number
}
class SpotifyClient {
private accessToken: string
private baseUrl = 'https://api.spotify.com/v1'
constructor(accessToken: string) {
this.accessToken = accessToken
}
private async makeRequest<T>(endpoint: string): Promise<T> {
const response = await ffetch(endpoint, {
baseUrl: this.baseUrl,
headers: {
'Authorization': `Bearer ${this.accessToken}`,
'Content-Type': 'application/json',
},
})
if (!response.ok) {
throw new Error(`Spotify API error: ${response.status} ${response.statusText}: ${await response.text()}`)
}
return response.json()
}
async getLikedTracks(): Promise<SpotifyTrack[]> {
const allTracks: SpotifyTrack[] = []
let url = '/me/tracks?limit=50'
while (url) {
const response = await this.makeRequest<SpotifyResponse<SpotifyTrack>>(url)
allTracks.push(...response.items)
console.log(`Fetched ${allTracks.length} out of ${response.total} tracks`)
url = response.next ? response.next.replace(this.baseUrl, '') : ''
}
return allTracks
}
async getAlbum(albumId: string): Promise<SpotifyAlbum> {
return this.makeRequest<SpotifyAlbum>(`/albums/${albumId}`)
}
}
interface DayStats {
[key: string]: {
count: number
albums: Array<{
name: string
artist: string
releaseDate: string
}>
}
}
function getDayOfWeek(dateString: string, precision: string): string {
if (precision === 'year') {
return 'Unknown (Year only)'
}
if (precision === 'month') {
return 'Unknown (Month only)'
}
try {
const date = new Date(dateString)
if (Number.isNaN(date.getTime())) {
return 'Unknown (Invalid date)'
}
const days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
return days[date.getDay()]
} catch (error) {
return 'Unknown (Parse error)'
}
}
async function main() {
const accessToken = getEnv('SPOTIFY_API_TOKEN')
if (!accessToken) {
console.error('Error: SPOTIFY_API_TOKEN environment variable is required')
process.exit(1)
}
console.log('🎵 Fetching your liked tracks from Spotify...')
const spotify = new SpotifyClient(accessToken)
try {
const likedTracks = await spotify.getLikedTracks()
console.log(`Found ${likedTracks.length} liked tracks`)
const processedAlbums = new Set<string>()
const dayStats: DayStats = {}
// First, count unique albums from tracks
const uniqueAlbumIds = new Set<string>()
for (const track of likedTracks) {
uniqueAlbumIds.add(track.track.album.id)
}
console.log(`📊 Analyzing ${uniqueAlbumIds.size} unique album release dates...`)
let processedCount = 0
let skippedCount = 0
for (const track of likedTracks) {
const albumId = track.track.album.id
// Skip if we've already processed this album
if (processedAlbums.has(albumId)) {
skippedCount++
continue
}
processedAlbums.add(albumId)
processedCount++
try {
// Get detailed album info
const album = await spotify.getAlbum(albumId)
const dayOfWeek = getDayOfWeek(album.release_date, album.release_date_precision)
if (!dayStats[dayOfWeek]) {
dayStats[dayOfWeek] = {
count: 0,
albums: [],
}
}
dayStats[dayOfWeek].count++
dayStats[dayOfWeek].albums.push({
name: album.name,
artist: album.artists.map(a => a.name).join(', '),
releaseDate: album.release_date,
})
// Progress reporting
if (processedCount % 10 === 0 || processedCount === uniqueAlbumIds.size) {
console.log(`Progress: ${processedCount}/${uniqueAlbumIds.size} albums processed (${skippedCount} skipped)`)
}
// Add a small delay to avoid rate limiting
await new Promise(resolve => setTimeout(resolve, 100))
} catch (error) {
console.warn(`Failed to fetch album info for ${track.track.album.name}: ${error}`)
}
}
console.log('\n📈 Album Release Day Statistics')
console.log('='.repeat(50))
// Sort by count (descending)
const sortedStats = Object.entries(dayStats)
.sort(([,a], [,b]) => b.count - a.count)
for (const [day, stats] of sortedStats) {
console.log(`\n${day}: ${stats.count} albums`)
console.log('-'.repeat(30))
// Show top 5 albums for this day
const topAlbums = stats.albums.slice(0, 5)
for (const album of topAlbums) {
console.log(`${album.name} by ${album.artist} (${album.releaseDate})`)
}
if (stats.albums.length > 5) {
console.log(` ... and ${stats.albums.length - 5} more`)
}
}
console.log('\n📊 Summary:')
console.log(`Total unique albums found: ${uniqueAlbumIds.size}`)
console.log(`Total unique albums analyzed: ${processedAlbums.size}`)
console.log(`Albums skipped (duplicates): ${skippedCount}`)
console.log(`Total liked tracks: ${likedTracks.length}`)
} catch (error) {
console.error('Error:', error)
process.exit(1)
}
}
await main()

View file

@ -0,0 +1,56 @@
import { readFile } from 'node:fs/promises'
import { join } from 'node:path'
import plist from 'plist'
import { z } from 'zod'
import { $, sleep } from 'zx'
import { ffetch } from '../../utils/fetch.ts'
const latestVerInfo = await ffetch('https://api.github.com/repos/forkgram/tdesktop/releases/latest').parsedJson(
z.object({
tag_name: z.string().transform(v => v.replace(/^v/, '')),
assets: z.array(z.object({
name: z.string(),
browser_download_url: z.string(),
})),
}),
)
const INSTALL_PATH = '/Applications/Forkgram.app'
console.log('latest version:', latestVerInfo.tag_name)
const installedPlist = await readFile(join(INSTALL_PATH, 'Contents/Info.plist'), 'utf8')
const installedPlistParsed = z.object({
CFBundleShortVersionString: z.string(),
}).parse(plist.parse(installedPlist))
console.log('installed version:', installedPlistParsed.CFBundleShortVersionString)
if (installedPlistParsed.CFBundleShortVersionString === latestVerInfo.tag_name) {
console.log('✅ no update needed')
process.exit(0)
}
const arm64Asset = latestVerInfo.assets.find(asset => asset.name === 'Forkgram.macOS.no.auto-update_arm64.zip')
if (!arm64Asset) {
console.error('❌ no arm64 asset found')
process.exit(1)
}
console.log('installing new version...')
await $`curl -L ${arm64Asset.browser_download_url} -o /tmp/forkgram.zip`
await $`unzip -o /tmp/forkgram.zip -d /tmp/forkgram`
const pid = await $`/usr/bin/pgrep -f /Applications/Forkgram.app/Contents/MacOS/Telegram`.text().catch(() => null)
if (pid) {
await $`kill -9 ${pid.trim()}`
}
await $`rm -rf ${INSTALL_PATH}`
await $`mv /tmp/forkgram/Telegram.app ${INSTALL_PATH}`
await $`rm -rf /tmp/forkgram`
await $`xattr -cr ${INSTALL_PATH}`
await sleep(1000)
await $`open ${INSTALL_PATH}`
console.log('✅ done')

View file

@ -0,0 +1,87 @@
import { randomBytes } from 'node:crypto'
import { faker } from '@faker-js/faker'
import { question } from 'zx'
import { ffetch } from '../../utils/fetch.ts'
// log in with your yandex account in the browser, then go to music.yandex.ru and open devtools
// find long ass string in "Cookie" header from the requests to music.yandex.ru, it must contain "Session_id" cookie.
// make sure to copy it completely (on firefox this requires toggling "Raw")
// looks something like: is_gdpr=0; is_gdpr=0; is_gdpr_b=COnCMBCR0wIoAg==; _yasc=ctfv6IPUcb+Lk+jqYr0thW1STKmQC5yB4IJUM5Gn....
const cookies = await question('music.yandex.ru cookies > ')
const parsed = new Map(cookies.split('; ').map((cookie) => {
const [name, value] = cookie.split('=')
return [name, value]
}))
if (!parsed.has('Session_id')) {
throw new Error('Session_id cookie not found')
}
const deviceId = randomBytes(16).toString('hex')
const uuid = randomBytes(16).toString('hex')
const genRequestId = () => `${uuid}${Math.floor(Date.now())}`
const query = {
manufacturer: 'Google',
model: 'Pixel 9 Pro XL',
app_platform: 'Android 16 (REL)',
am_version_name: '7.46.0(746003972)',
app_id: 'ru.yandex.music',
app_version_name: '2025.09.2 #114gpr',
am_app: 'ru.yandex.music 2025.09.2 #114gpr',
deviceid: deviceId,
device_id: deviceId,
uuid,
}
const res = await ffetch('https://mobileproxy.passport.yandex.net/1/bundle/oauth/token_by_sessionid', {
query: {
...query,
request_id: genRequestId(),
},
form: {
client_id: 'c0ebe342af7d48fbbbfcf2d2eedb8f9e',
client_secret: 'ad0a908f0aa341a182a37ecd75bc319e',
grant_type: 'sessionid',
host: 'yandex.ru',
},
headers: {
'Accept': '*/*',
'User-Agent': 'com.yandex.mobile.auth.sdk/7.46.0.746003972 (Google Pixel 9 Pro XL; Android 16) PassportSDK/7.46.0.746003972',
'Accept-Language': 'en-RU;q=1, ru-RU;q=0.9',
'Ya-Client-Host': 'passport.yandex.ru',
'Ya-Client-Cookie': cookies,
},
}).json() as any
if (res.status !== 'ok') {
console.error('Unexpected response:', res)
process.exit(1)
}
console.log('res', res)
const res2 = await ffetch('https://mobileproxy.passport.yandex.net/1/token', {
query: {
...query,
request_id: genRequestId(),
},
form: {
access_token: res.access_token,
client_id: '23cabbbdc6cd418abb4b39c32c41195d',
client_secret: '53bc75238f0c4d08a118e51fe9203300',
grant_type: 'x-token',
},
}).json() as any
if (!res2.access_token) {
console.error('Unexpected response:', res2)
process.exit(1)
}
console.log('res2', res2)
console.log('')
console.log('Your auth token is:')
console.log(res2.access_token)
console.log('Expires at:', new Date(Date.now() + res.expires_in * 1000).toLocaleString('ru-RU'))

26
tsconfig.json Normal file
View file

@ -0,0 +1,26 @@
{
"compilerOptions": {
"target": "ESNext",
"lib": ["ESNext", "DOM"],
"moduleDetection": "force",
"module": "ESNext",
// Bundler mode
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"allowJs": true,
// Best practices
"strict": true,
"noFallthroughCasesInSwitch": true,
"noImplicitAny": false,
"noPropertyAccessFromIndexSignature": false,
// Some stricter flags (disabled by default)
"noUnusedLocals": false,
"noUnusedParameters": false,
"noEmit": true,
"verbatimModuleSyntax": true,
"skipLibCheck": true
}
}

25
utils/counter.ts Normal file
View file

@ -0,0 +1,25 @@
export function counterIter(start = 0, end = Infinity) {
let i = start
let ended = false
const iter: IterableIterator<number> = {
[Symbol.iterator]: () => iter,
next() {
if (ended) {
return { value: undefined, done: true }
}
if (i > end) {
return { value: undefined, done: true }
}
return { value: i++, done: false }
},
}
return {
iter,
end: () => {
ended = true
},
}
}

147
utils/csv.ts Normal file
View file

@ -0,0 +1,147 @@
import { FramedReader, type IReadable, TextDelimiterCodec } from '@fuman/io'
interface CsvReaderOptions {
/** @default '\n' */
lineDelimiter: string
/** @default ',' */
delimiter: string
/** @default '"' */
quote: string
/** @default '"' */
quoteEscape: string
/**
* if true, missing values in a line will be treated as empty strings
* @default false
*/
assumeEmptyValues: boolean
/** whether to treat all data from the readable as data (requires `schema` to be set) */
skipHeader: boolean
}
export class CsvReader<const Fields extends string[] = string[]> {
#codec: FramedReader<string>
readonly options: CsvReaderOptions
#schema?: Fields
constructor(
stream: IReadable,
options: Partial<CsvReaderOptions> & {
/** fields that are expected in the csv */
schema?: Fields
} = {},
) {
this.options = {
lineDelimiter: '\n',
delimiter: ',',
quote: '"',
quoteEscape: '"',
assumeEmptyValues: false,
skipHeader: false,
...options,
}
this.#codec = new FramedReader(stream, new TextDelimiterCodec(this.options.lineDelimiter))
this.#schema = options.schema
if (options.skipHeader) {
if (!options.schema) throw new Error('schema is required if includeHeader is true')
this.#header = options.schema
}
}
#header?: string[]
async read(): Promise<Record<Fields[number], string> | null> {
let line = await this.#codec.read()
if (!line) return null
line = line.trim()
if (line === '') return this.read()
if (!this.#header) {
this.#header = line.split(this.options.delimiter).map(s => s.trim())
if (JSON.stringify(this.#schema!) !== JSON.stringify(this.#header)) {
throw new Error(`schema and header are the same (expected ${this.#schema!.join(', ')}; got ${this.#header.join(', ')})`)
}
return this.read()
}
const obj: Record<string, string> = {}
let insideQuote = false
let currentFieldIdx = 0
let currentValue = ''
for (let i = 0; i < line.length; i++) {
if (line[i] === this.options.quoteEscape) {
if (insideQuote && line[i + 1] === this.options.quote) {
i++
currentValue += this.options.quote
continue
}
}
if (line[i] === this.options.quote) {
if (!insideQuote) {
if (currentValue !== '') {
throw new Error('unexpected open quote mid-value')
}
insideQuote = true
continue
}
if (i !== line.length - 1 && line[i + 1] !== this.options.delimiter) {
console.log(i, line.length, line[i + 1])
throw new Error(`unexpected close quote mid-value at ${i}`)
}
insideQuote = false
continue
}
if (insideQuote) {
currentValue += line[i]
continue
}
if (line[i] === this.options.delimiter) {
obj[this.#header[currentFieldIdx]] = currentValue
currentFieldIdx += 1
currentValue = ''
if (currentFieldIdx > this.#header.length) {
throw new Error('too many fields')
}
continue
}
currentValue += line[i]
}
obj[this.#header[currentFieldIdx++]] = currentValue
if (currentFieldIdx < this.#header.length) {
if (this.options.assumeEmptyValues) {
for (let i = currentFieldIdx; i < this.#header.length; i++) {
obj[this.#header[i]] = ''
}
} else {
throw new Error(`missing values for fields: ${this.#header.slice(currentFieldIdx).join(', ')}`)
}
}
return obj as Record<Fields[number], string>
}
[Symbol.asyncIterator]() {
const iter: AsyncIterableIterator<Record<Fields[number], string>> = {
next: async () => {
const obj = await this.read()
if (!obj) return { done: true, value: undefined }
return { done: false, value: obj }
},
[Symbol.asyncIterator]: () => iter,
}
return iter
}
}

113
utils/currency.ts Normal file
View file

@ -0,0 +1,113 @@
import { asyncPool } from '@fuman/utils'
import { z } from 'zod'
import { ffetch } from './fetch.ts'
import { getEnv } from './misc.ts'
// token management
const TOKENS = getEnv('OXR_TOKENS').split(',')
// api token => requests remaining
const usageAvailable = new Map<string, number>()
function getToken() {
// find token with the most requests remaining
const token = TOKENS.find(t => usageAvailable.get(t)! > 0)
if (!token) throw new Error('no tokens available')
// consume 1 request
usageAvailable.set(token, usageAvailable.get(token)! - 1)
return token
}
// base => other => value
// NB: ideally we should have expiration and persistence on this
const data = new Map<string, Record<string, number>>()
async function fetchMissingPairs(list: { from: string, to: string }[]) {
const missing = list.filter(c => !data.has(c.from) && !data.has(c.to) && c.from !== c.to)
if (missing.length === 0) return
const basesToFetch = new Set<string>()
for (const { from, to } of missing) {
if (!basesToFetch.has(from) && !basesToFetch.has(to)) {
basesToFetch.add(from)
}
}
if (!usageAvailable.size) {
// NB: ideally we should lock here for a production-ready implementation
// fetch usage for all tokens
await asyncPool(TOKENS, async (token) => {
const res = await ffetch('https://openexchangerates.org/api/usage.json', {
query: {
app_id: token,
},
}).parsedJson(z.object({
status: z.literal(200),
data: z.object({
app_id: z.string(),
status: z.literal('active'),
usage: z.object({
requests_remaining: z.number(),
}),
}),
}))
usageAvailable.set(token, res.data.usage.requests_remaining)
}, { onError: () => 'ignore' })
if (!usageAvailable.size) {
throw new Error('failed to fetch usage, are all tokens dead?')
}
}
// console.log('will fetch bases:', [...basesToFetch])
await asyncPool(basesToFetch, async (base) => {
const res = await ffetch('https://openexchangerates.org/api/latest.json', {
query: {
app_id: getToken(),
},
}).parsedJson(z.object({
rates: z.record(z.string(), z.number()),
}))
data.set(base, res.rates)
})
}
export async function convertCurrenciesBatch(list: { from: string, to: string, amount: number }[]) {
await fetchMissingPairs(list)
const ret: { from: string, to: string, amount: number, converted: number }[] = []
for (const { from, to, amount } of list) {
let result: number
if (from === to) {
result = amount
} else if (data.has(from)) {
const rate = data.get(from)![to]!
if (!rate) throw new Error(`rate unavailable: ${from} -> ${to}`)
result = amount * rate
// console.log('converted from', from, 'to', to, 'amount', amount, 'result', result, 'rate', rate)
} else if (data.has(to)) {
const rate = data.get(to)![from]!
if (!rate) throw new Error(`rate unavailable: ${from} -> ${to}`)
result = amount / rate
// console.log('converted rev from', from, 'to', to, 'amount', amount, 'result', result, 'rate', rate)
} else {
throw new Error(`rate unavailable: ${from} -> ${to}`)
}
ret.push({
from,
to,
amount,
converted: result,
})
}
return ret
}

42
utils/fetch.ts Normal file
View file

@ -0,0 +1,42 @@
import { createWriteStream } from 'node:fs'
import { type FfetchAddon, ffetchAddons, ffetchBase, type FfetchResultInternals } from '@fuman/fetch'
import { toughCookieAddon } from '@fuman/fetch/tough'
import { ffetchZodAdapter } from '@fuman/fetch/zod'
import { webReadableToFuman, write } from '@fuman/io'
import { nodeWritableToFuman } from '@fuman/node'
import { type CheerioAPI, load } from 'cheerio'
import { ProxyAgent } from 'undici'
const cheerioAddon: FfetchAddon<object, { cheerio: () => Promise<CheerioAPI> }> = {
response: {
async cheerio(this: FfetchResultInternals<object>) {
this._headers ??= {}
this._headers.Accept ??= 'text/html; charset=utf-8'
return load(await this.text())
},
},
}
export const ffetch = ffetchBase.extend({
addons: [
ffetchAddons.parser(ffetchZodAdapter()),
ffetchAddons.rateLimitHandler(),
cheerioAddon,
toughCookieAddon(),
],
extra: {
dispatcher: process.env.http_proxy ? new ProxyAgent(process.env.http_proxy) : undefined,
} as any,
})
export async function downloadStream(stream: ReadableStream, path: string) {
const file = nodeWritableToFuman(createWriteStream(path))
await write.pipe(file, webReadableToFuman(stream))
file.close()
}
export async function downloadFile(url: string, path: string, extra?: Parameters<typeof ffetch>[1]) {
const stream = await ffetch(url, extra).stream()
await downloadStream(stream, path)
}

39
utils/fs.ts Normal file
View file

@ -0,0 +1,39 @@
import * as fsp from 'node:fs/promises'
export async function fileExists(path: string): Promise<boolean> {
try {
const stat = await fsp.stat(path)
return stat.isFile()
} catch {
return false
}
}
export async function directoryExists(path: string): Promise<boolean> {
try {
const stat = await fsp.stat(path)
return stat.isDirectory()
} catch {
return false
}
}
export function sanitizeFilename(filename: string) {
return filename.replace(/[/\\?%*:|"<>]/g, '_')
}
export async function writeWebStreamToFile(stream: ReadableStream<unknown>, path: string) {
const fd = await fsp.open(path, 'w+')
const writer = fd.createWriteStream()
for await (const chunk of stream as any) {
writer.write(chunk)
}
writer.end()
await new Promise<void>((resolve, reject) => {
writer.on('error', reject)
writer.on('finish', resolve)
})
}

123
utils/media-metadata.ts Normal file
View file

@ -0,0 +1,123 @@
import type { ProcessPromise } from 'zx'
import { Readable } from 'node:stream'
import { Bytes, write } from '@fuman/io'
import { $ } from 'zx'
export async function generateOpusImageBlob(image: Uint8Array) {
// todo we should probably not use ffprobe here but whatever lol
const proc = $`ffprobe -of json -v error -show_entries stream=codec_name,width,height pipe:0`
proc.stdin.write(image)
proc.stdin.end()
const json = await proc.json()
const img = json.streams[0]
// https://www.rfc-editor.org/rfc/rfc9639.html#section-8.8
const mime = img.codec_name === 'mjpeg' ? 'image/jpeg' : 'image/png'
const description = 'Cover Artwork'
const res = Bytes.alloc(image.length + 128)
write.uint32be(res, 3) // picture type = album cover
write.uint32be(res, mime.length)
write.rawString(res, mime)
write.uint32be(res, description.length)
write.rawString(res, description)
write.uint32be(res, img.width)
write.uint32be(res, img.height)
write.uint32be(res, 0) // color depth
write.uint32be(res, 0) // color index (unused, for gifs)
write.uint32be(res, image.length)
write.bytes(res, image)
return res.result()
}
export async function runMetaflac(options: {
path: string
tags: Partial<Record<
| 'TITLE'
| 'ARTIST'
| 'COMPOSER'
| 'ALBUM'
| 'DATE'
| 'DISCNUMBER'
| 'TRACKNUMBER'
| 'COMMENT'
| 'PRODUCER'
| 'LYRICIST'
| 'PERFORMER'
| 'COPYRIGHT'
| 'ISRC'
| 'LYRICS'
| 'MAIN_ARTIST'
| 'REPLAYGAIN_ALBUM_GAIN'
| 'REPLAYGAIN_TRACK_GAIN'
| 'REPLAYGAIN_ALBUM_PEAK'
| 'REPLAYGAIN_TRACK_PEAK'
| 'BPM',
string | number | string[] | null
>>
coverPath?: string
}) {
const params: string[] = [
'--remove-all-tags',
]
for (const [key, value] of Object.entries(options.tags)) {
if (value == null) continue
if (Array.isArray(value)) {
for (const v of value) {
params.push(`--set-tag=${key}=${v}`)
}
} else {
params.push(`--set-tag=${key}=${value}`)
}
}
if (options.coverPath) {
params.push(`--import-picture-from=${options.coverPath}`)
}
params.push(options.path)
await $`metaflac ${params}`
}
export function generateFfmpegMetadataFlags(metadata: Partial<Record<string, string | string[]>>) {
const res: string[] = []
for (const [key, value] of Object.entries(metadata)) {
if (value == null) continue
if (Array.isArray(value)) {
for (const v of value) {
res.push('-metadata', `${key}=${v}`)
}
} else {
res.push('-metadata', `${key}=${value}`)
}
}
return res
}
export async function pipeIntoProc(proc: ProcessPromise, stream: ReadableStream) {
const nodeStream = Readable.fromWeb(stream as any)
await new Promise<void>((resolve, reject) => {
nodeStream.on('error', reject)
const pipe = nodeStream.pipe(proc.stdin)
pipe.on('error', reject)
pipe.on('finish', resolve)
})
}
export async function writeIntoProc(proc: ProcessPromise, data: Uint8Array) {
return new Promise<void>((resolve, reject) => {
proc.stdin.write(data, (err) => {
if (err) {
reject(err)
} else {
proc.stdin.end()
resolve()
}
})
})
}

16
utils/misc.ts Normal file
View file

@ -0,0 +1,16 @@
import 'dotenv/config'
export function getEnv(key: string): string
export function getEnv<T>(key: string, parser: (value: string) => T): T
export function getEnv<T>(key: string, parser?: (value: string) => T): T | string {
const value = process.env[key]
if (!value) throw new Error(`env variable ${key} not found`)
if (!parser) return value
return parser(value)
}
export function* chunks<T>(arr: T[], size: number) {
for (let i = 0; i < arr.length; i += size) {
yield arr.slice(i, i + size)
}
}

123
utils/mpd.ts Normal file
View file

@ -0,0 +1,123 @@
import { assert, ConditionVariable } from '@fuman/utils'
import { load } from 'cheerio'
import { ffetch } from './fetch.ts'
import { writeWebStreamToFile } from './fs.ts'
interface SimpleMpd {
codecs: string
initUrl: string
segmentUrls: string[]
}
export function parseSimpleMpd(xml: string): SimpleMpd {
const $ = load(xml, { xml: true })
const period = $('Period')
assert(period.length === 1, 'expected exactly one period')
const adaptations = period.find('AdaptationSet')
assert(adaptations.length === 1, 'expected exactly one adaptation set')
const representation = adaptations.find('Representation')
assert(representation.length === 1, 'expected exactly one representation')
const segmentTemplate = representation.find('SegmentTemplate')
assert(segmentTemplate.length === 1, 'expected exactly one segment template')
const initUrl = segmentTemplate.attr('initialization')
const templateUrl = segmentTemplate.attr('media')
const startNum = segmentTemplate.attr('startNumber')
assert(initUrl !== undefined, 'expected initialization url')
assert(templateUrl !== undefined, 'expected template url')
assert(!templateUrl.match(/\$(RepresentationID|Bandwidth|Time)\$/), 'unsupported template url')
assert(startNum !== undefined, 'expected start number')
const timeline = segmentTemplate.find('SegmentTimeline')
assert(timeline.length === 1, 'expected exactly one segment timeline')
const segments = timeline.find('S')
assert(segments.length > 0, 'expected at least one segment')
const segmentUrls: string[] = []
let segmentNum = Number(startNum)
for (const segment of segments) {
const duration = $(segment).attr('d')
assert(duration !== undefined, 'expected duration')
const r = $(segment).attr('r')
const repeats = r ? Number.parseInt(r) + 1 : 1
for (let i = 0; i < repeats; i++) {
segmentUrls.push(templateUrl.replace('$Number$', String(segmentNum)))
segmentNum++
}
}
return {
codecs: representation.attr('codecs')!,
initUrl,
segmentUrls,
}
}
export function concatMpdSegments(options: {
mpd: SimpleMpd
fetch: (url: string) => Promise<Uint8Array>
poolSize?: number
}): ReadableStream {
const { mpd, fetch, poolSize = 8 } = options
let nextSegmentIdx = -1
let nextWorkerSegmentIdx = -1
const nextSegmentCv = new ConditionVariable()
const buffer: Record<number, Uint8Array> = {}
const downloadSegment = async (idx = nextWorkerSegmentIdx++) => {
// console.log('downloading segment %s', idx)
const url = idx === -1 ? mpd.initUrl : mpd.segmentUrls[idx]
const chunk = await fetch(url)
buffer[idx] = chunk
if (idx === nextSegmentIdx) {
nextSegmentCv.notify()
}
if (nextWorkerSegmentIdx < mpd.segmentUrls.length) {
return downloadSegment()
}
}
let error: unknown
void Promise.all(Array.from({
length: Math.min(poolSize, mpd.segmentUrls.length),
}, downloadSegment))
.catch((e) => {
error = e
nextSegmentCv.notify()
})
return new ReadableStream({
async start(controller) {
while (true) {
await nextSegmentCv.wait()
if (error) {
controller.error(error)
return
}
while (nextSegmentIdx in buffer) {
const buf = buffer[nextSegmentIdx]
delete buffer[nextSegmentIdx]
nextSegmentIdx++
controller.enqueue(buf)
}
if (nextSegmentIdx >= mpd.segmentUrls.length) {
controller.close()
return
}
}
},
})
}

74
utils/navidrome.ts Normal file
View file

@ -0,0 +1,74 @@
import { z } from 'zod'
import { ffetch as ffetchBase } from './fetch.ts'
import { getEnv } from './misc.ts'
let _cachedFfetch: typeof ffetchBase | undefined
export async function getNavidromeFfetch() {
if (_cachedFfetch) return _cachedFfetch
const baseUrl = getEnv('NAVIDROME_ENDPOINT')
const authRes = await ffetchBase.post('/auth/login', {
baseUrl,
json: {
username: getEnv('NAVIDROME_USERNAME'),
password: getEnv('NAVIDROME_PASSWORD'),
},
}).parsedJson(z.object({ token: z.string() }))
_cachedFfetch = ffetchBase.extend({
baseUrl,
headers: {
'x-nd-authorization': `Bearer ${authRes.token}`,
},
})
return _cachedFfetch
}
export const NavidromeSong = z.object({
id: z.string(),
title: z.string(),
album: z.string(),
albumArtist: z.string(),
artist: z.string(),
path: z.string(),
libraryPath: z.string(),
duration: z.number(),
size: z.number(),
participants: z.object({
artist: z.object({
id: z.string(),
name: z.string(),
}).array().optional(),
}).optional(),
mbzRecordingID: z.string().optional(),
mbzReleaseTrackId: z.string().optional(),
mbzAlbumId: z.string().optional(),
mbzReleaseGroupId: z.string().optional(),
})
export type NavidromeSong = z.infer<typeof NavidromeSong>
export async function fetchSongs(offset: number, pageSize: number) {
const api = await getNavidromeFfetch()
return api('/api/song', {
query: {
_start: offset,
_end: offset + pageSize,
_order: 'ASC',
_sort: 'path',
},
}).parsedJson(z.array(NavidromeSong))
}
export async function* fetchSongsIter(params?: {
chunkSize?: number
onChunkProcessed?: (page: number, items: number) => void
}) {
const { chunkSize = 1000, onChunkProcessed } = params ?? {}
for (let offset = 0; ; offset += chunkSize) {
const songs = await fetchSongs(offset, chunkSize)
if (songs.length === 0) return
yield * songs
onChunkProcessed?.(Math.floor(offset / chunkSize), songs.length)
}
}

78
utils/oauth.ts Normal file
View file

@ -0,0 +1,78 @@
import type { MaybePromise } from '@fuman/utils'
import * as fsp from 'node:fs/promises'
import { z } from 'zod'
export interface OauthStorage {
write: (value: string) => MaybePromise<void>
read: () => MaybePromise<string | null>
}
export class LocalOauthStorage implements OauthStorage {
constructor(private filename: string) {}
async write(value: string) {
await fsp.writeFile(this.filename, value)
}
async read() {
try {
return await fsp.readFile(this.filename, 'utf8')
} catch (e) {
return null
}
}
}
const OauthState = z.object({
accessToken: z.string(),
refreshToken: z.string().optional(),
expiresAt: z.number(),
})
type OauthState = z.infer<typeof OauthState>
export class OauthHandler {
constructor(private params: {
storage: OauthStorage
refreshToken: (refreshToken: string) => MaybePromise<{
accessToken: string
refreshToken: string
expiresIn: number
}>
/** number of milliseconds to subtract from token expiration time */
jitter?: number
}) {
this.params.jitter = this.params.jitter ?? 5000
}
#cache: OauthState | null = null
async readOauthState() {
if (this.#cache) return this.#cache
const value = await this.params.storage.read()
if (!value) return null
return OauthState.parse(JSON.parse(value))
}
async writeOauthState(value: OauthState) {
this.#cache = value
await this.params.storage.write(JSON.stringify(value))
}
async getAccessToken() {
const state = await this.readOauthState()
if (!state) return null
if (state.expiresAt < Date.now() + this.params.jitter!) {
if (!state.refreshToken) return null
const { accessToken, refreshToken, expiresIn } = await this.params.refreshToken(state.refreshToken)
await this.writeOauthState({
accessToken,
refreshToken,
expiresAt: Date.now() + expiresIn * 1000,
})
return accessToken
}
return state.accessToken
}
}

54
utils/strings.ts Normal file
View file

@ -0,0 +1,54 @@
export function parseJsObject(str: string, offset = 0) {
let i = offset
const len = str.length
let start = -1
let end = -1
const depth = {
'{': 0,
'[': 0,
}
const possibleQuotes = {
'"': true,
'\'': true,
'`': true,
}
let inQuote: string | null = null
let escapeNextQuote = false
while (i < len) {
const char = str[i]
if (char in possibleQuotes && !escapeNextQuote) {
if (inQuote === null) {
inQuote = char
} else if (char === inQuote) {
inQuote = null
}
} else if (inQuote != null) {
escapeNextQuote = char === '\\' && !escapeNextQuote
} else if (inQuote == null && char in depth) {
if (start === -1) {
start = i
}
depth[char] += 1
} else if (inQuote == null && (
char === '}' || char === ']'
)) {
if (char === '}') depth['{'] -= 1
if (char === ']') depth['['] -= 1
if (depth['{'] === 0 && depth['['] === 0) {
end = i + 1
break
}
}
i += 1
}
if (start === -1 && end === -1) return null
if (depth['{'] !== 0 || depth['['] !== 0) throw new SyntaxError('Mismatched brackets')
if (inQuote) throw new SyntaxError('Unclosed string')
return str.substring(start, end)
}

11
utils/telegram.ts Normal file
View file

@ -0,0 +1,11 @@
import { TelegramClient, type TelegramClientOptions } from '@mtcute/node'
import { getEnv } from './misc.ts'
export function createTg(session: string, extra?: Partial<TelegramClientOptions>) {
return new TelegramClient({
apiId: getEnv('TELEGRAM_API_ID', Number),
apiHash: getEnv('TELEGRAM_API_HASH'),
storage: `assets/${session}.session`,
...extra,
})
}

324
utils/webdav.ts Normal file
View file

@ -0,0 +1,324 @@
import { ffetchBase, type FfetchResult } from '@fuman/fetch'
import { asNonNull, assert, base64, utf8 } from '@fuman/utils'
import { Parser } from 'htmlparser2'
import { z } from 'zod'
const XML_HEADER = '<?xml version="1.0" encoding="utf-8" ?>'
export interface WebdavClientOptions {
baseUrl: string
username?: string
password?: string
headers?: Record<string, string>
}
export interface WebdavResourceBase {
href: string
name: string
status: string
lastModified?: Date
raw: Record<string, unknown>
// todo: lockdiscovery
// todo: supportedlock
}
export interface WebdavCollection extends WebdavResourceBase {
type: 'collection'
}
export interface WebdavFile extends WebdavResourceBase {
type: 'file'
size: number
etag?: string
contentType?: string
}
export type WebdavResource = WebdavCollection | WebdavFile
const DResponseSchema = z.object({
'd:href': z.string(),
'd:propstat': z.object({
'd:prop': z.object({
'd:resourcetype': z.union([
z.literal(true),
z.object({
'd:collection': z.literal(true),
}),
]),
'd:displayname': z.union([z.literal(true), z.string()]),
'd:getcontentlength': z.coerce.number().optional(),
'd:getlastmodified': z.string().transform(v => new Date(v)).optional(),
'd:getetag': z.string().optional(),
'd:getcontenttype': z.string().optional(),
}).passthrough(),
'd:status': z.string(),
}),
})
const DMultistatusSchema = z.object({
'd:multistatus': z.tuple([z.object({
'd:response': z.array(DResponseSchema),
})]),
})
function escapeXml(str: string) {
return str.replace(/</g, '&lt;').replace(/>/g, '&gt;')
}
function xmlToJson(xml: string) {
const res: Record<string, any[]> = {}
const stack: any[] = [res]
const parser = new Parser({
onopentag(name) {
name = name.toLowerCase()
const node: any = {}
const top = stack[stack.length - 1]
if (!top[name]) {
top[name] = []
}
top[name].push(node)
stack.push(node)
},
onclosetag(name) {
const obj = stack.pop()
const top = stack[stack.length - 1]
const ourIdx = top[name].length - 1
const keys = Object.keys(obj)
if (keys.length === 1 && keys[0] === '_text') {
top[name][ourIdx] = obj._text
} else if (keys.length === 0) {
top[name][ourIdx] = true
} else {
// replace one-element arrays with the element itself
for (const key of keys) {
if (key === 'd:response') continue
const val = obj[key]
if (Array.isArray(val) && val.length === 1) {
obj[key] = val[0]
}
}
}
},
ontext(text) {
const top = stack[stack.length - 1]
if (top._text === undefined) {
top._text = ''
}
top._text += text
},
})
parser.write(xml)
parser.end()
return res
}
export class WebdavClient {
readonly ffetch: typeof ffetchBase
readonly basePath
constructor(options: WebdavClientOptions) {
const headers: Record<string, string> = {
'Content-Type': 'application/xml; charset="utf-8"',
...options.headers,
}
if (options.username) {
let authStr = options.username
if (options.password) {
authStr += `:${options.password}`
}
headers.Authorization = `Basic ${base64.encode(utf8.encoder.encode(authStr))}`
}
this.ffetch = ffetchBase.extend({
baseUrl: options.baseUrl,
headers,
})
this.basePath = new URL(options.baseUrl).pathname
if (this.basePath[this.basePath.length - 1] !== '/') {
this.basePath += '/'
}
}
mapPropfindResponse = (obj: z.infer<typeof DResponseSchema>): WebdavResource => {
const name = obj['d:propstat']['d:prop']['d:displayname']
const base: WebdavResourceBase = {
href: obj['d:href'],
name: name === true ? '' : name,
status: obj['d:propstat']['d:status'],
lastModified: obj['d:propstat']['d:prop']['d:getlastmodified'],
raw: obj['d:propstat']['d:prop'],
}
if (base.href.startsWith(this.basePath)) {
base.href = base.href.slice(this.basePath.length)
if (base.href !== '/') {
base.href = `/${base.href}`
}
}
if (typeof obj['d:propstat']['d:prop']['d:resourcetype'] === 'object' && obj['d:propstat']['d:prop']['d:resourcetype']['d:collection']) {
const res = base as WebdavCollection
res.type = 'collection'
return res
} else {
const res = base as WebdavFile
res.type = 'file'
res.size = asNonNull(obj['d:propstat']['d:prop']['d:getcontentlength'])
res.etag = obj['d:propstat']['d:prop']['d:getetag']
res.contentType = obj['d:propstat']['d:prop']['d:getcontenttype']
return res
}
}
async propfind(
path: string,
params?: {
depth?: number | 'infinity'
properties?: string[]
},
): Promise<WebdavResource[]> {
const body = params?.properties
? [
XML_HEADER,
'<d:propfind xmlns:D="DAV:">',
'<d:prop>',
...params.properties.map(prop => `<${prop}/>`),
'</d:prop>',
'</d:propfind>',
].join('\n')
: undefined
const res = await this.ffetch(path, {
method: 'PROPFIND',
headers: {
Depth: params?.depth ? String(params.depth) : '1',
},
body,
}).text()
const json = DMultistatusSchema.parse(xmlToJson(res))
return json['d:multistatus'][0]['d:response'].map(this.mapPropfindResponse)
}
async proppatch(path: string, params: {
set?: Record<string, string | { _xml: string }>
remove?: string[]
}): Promise<void> {
if (!params.set && !params.remove) return
const lines: string[] = [
XML_HEADER,
'<d:propertyupdate xmlns:D="DAV:">',
]
if (params.set) {
lines.push('<d:set>')
for (const [key, value] of Object.entries(params.set ?? {})) {
lines.push(`<d:prop><${key}>${
typeof value === 'object' ? value._xml : escapeXml(value)
}</${key}></d:prop>`)
}
lines.push('</d:set>')
}
if (params.remove) {
lines.push('<d:remove>')
for (const key of params.remove) {
lines.push(`<d:prop><${key}/></d:prop>`)
}
lines.push('</d:remove>')
}
lines.push('</d:propertyupdate>')
const body = lines.join('\n')
await this.ffetch(path, {
method: 'PROPPATCH',
body,
})
}
async mkcol(path: string): Promise<void> {
const res = await this.ffetch(path, {
method: 'MKCOL',
})
if (res.status !== 201) throw new Error(`mkcol failed: ${res.status}`)
}
async delete(path: string): Promise<void> {
const res = await this.ffetch(path, {
method: 'DELETE',
})
if (res.status !== 204) throw new Error(`delete failed: ${res.status}`)
}
get(path: string): FfetchResult {
return this.ffetch(path, {
method: 'GET',
})
}
async put(path: string, body: BodyInit): Promise<void> {
await this.ffetch(path, {
method: 'PUT',
body,
})
}
async copy(
source: string,
destination: string,
params?: {
/** whether to overwrite the destination if it exists */
overwrite?: boolean
depth?: number | 'infinity'
},
): Promise<void> {
if (destination[0] === '/') destination = destination.slice(1)
if (this.basePath) destination = this.basePath + destination
const headers: Record<string, string> = {
Destination: destination,
}
if (params?.overwrite !== true) {
headers.Overwrite = 'F'
}
if (params?.depth) {
headers.Depth = String(params.depth)
}
const res = await this.ffetch(source, {
method: 'COPY',
headers,
})
if (res.status !== 201) throw new Error(`copy failed: ${res.status}`)
}
async move(
source: string,
destination: string,
params?: {
/** whether to overwrite the destination if it exists */
overwrite?: boolean
depth?: number | 'infinity'
},
): Promise<void> {
if (destination[0] === '/') destination = destination.slice(1)
if (this.basePath) destination = this.basePath + destination
const headers: Record<string, string> = {
Destination: destination,
}
if (params?.overwrite !== true) {
headers.Overwrite = 'F'
}
if (params?.depth) {
headers.Depth = String(params.depth)
}
const res = await this.ffetch(source, {
method: 'MOVE',
headers,
})
if (res.status !== 201) throw new Error(`move failed: ${res.status}`)
}
}

109
utils/whoisxmlapi.ts Normal file
View file

@ -0,0 +1,109 @@
import { sleep } from '@fuman/utils'
import { z } from 'zod'
import { ffetch } from './fetch.ts'
import { getEnv } from './misc.ts'
// https://whois.whoisxmlapi.com/documentation/output
// not all fields are present currently
const WhoisResultSchema = z.object({
registrant: z.object({
organization: z.string(),
name: z.string(),
email: z.string(),
}).partial().optional(),
administrativeContact: z.object({
organization: z.string(),
name: z.string(),
email: z.string(),
}).partial().optional(),
technicalContact: z.object({
organization: z.string(),
name: z.string(),
email: z.string(),
}).partial().optional(),
registryData: z.object({
registrant: z.object({
organization: z.string(),
name: z.string(),
email: z.string(),
}).partial().optional(),
registrarName: z.string(),
createdDate: z.string(),
updatedDate: z.string(),
expiresDate: z.string(),
}).partial().optional(),
registrarName: z.string().optional(),
createdDate: z.string().optional(),
updatedDate: z.string().optional(),
expiresDate: z.string().optional(),
})
export type WhoisResult = z.infer<typeof WhoisResultSchema>
const WhoisWrapSchema = z.object({
domainName: z.string(),
domainStatus: z.enum(['I', 'N']),
whoisRecord: WhoisResultSchema.optional(),
})
export async function bulkWhois(domains: string[]) {
const res = await ffetch.post('https://www.whoisxmlapi.com/BulkWhoisLookup/bulkServices/bulkWhois', {
json: {
apiKey: getEnv('WHOISXMLAPI_TOKEN'),
domains,
outputFormat: 'JSON',
},
}).parsedJson(z.object({
requestId: z.string(),
}))
while (true) {
const res2 = await ffetch.post('https://www.whoisxmlapi.com/BulkWhoisLookup/bulkServices/getRecords', {
json: {
apiKey: getEnv('WHOISXMLAPI_KEY'),
requestId: res.requestId,
outputFormat: 'JSON',
maxRecords: 1,
},
}).parsedJson(z.object({
recordsLeft: z.number(),
}))
if (res2.recordsLeft !== 0) {
await sleep(1000)
continue
}
break
}
const result = new Map<string, WhoisResult | null>()
const finalRes = await ffetch.post('https://www.whoisxmlapi.com/BulkWhoisLookup/bulkServices/getRecords', {
json: {
apiKey: getEnv('WHOISXMLAPI_KEY'),
requestId: res.requestId,
outputFormat: 'JSON',
},
}).parsedJson(z.object({
whoisRecords: z.array(WhoisWrapSchema),
}))
for (const record of finalRes.whoisRecords) {
result.set(record.domainName, record.domainStatus === 'I' ? record.whoisRecord ?? null : null)
}
return result
}
export async function whois(domain: string) {
const res = await ffetch.post('https://www.whoisxmlapi.com/whoisserver/WhoisService', {
json: {
domainName: domain,
outputFormat: 'JSON',
apiKey: getEnv('WHOISXMLAPI_TOKEN'),
},
}).parsedJson(z.object({
WhoisRecord: WhoisResultSchema.optional(),
}))
return res.WhoisRecord ?? null
}

20
utils/xml.ts Normal file
View file

@ -0,0 +1,20 @@
import type { ChildNode } from 'domhandler'
import { DomHandler } from 'domhandler'
import { Parser } from 'htmlparser2'
export function xmlToDom(xml: string) {
let _error: Error | null = null
let _dom: ChildNode[] | null = null
const handler = new DomHandler((error, dom) => {
_error = error
_dom = dom
})
const parser = new Parser(handler)
parser.write(xml)
parser.end()
if (_error) throw _error
return _dom!
}