chore: update public repo

This commit is contained in:
desu-bot 2025-08-14 09:21:11 +00:00
parent 56472e5520
commit 6c1fe8a13c
7 changed files with 1625 additions and 95 deletions

View file

@ -11,20 +11,28 @@
"@types/better-sqlite3": "^7.6.12", "@types/better-sqlite3": "^7.6.12",
"@types/plist": "^3.0.5", "@types/plist": "^3.0.5",
"@types/spinnies": "^0.5.3", "@types/spinnies": "^0.5.3",
"babel-generator": "^6.26.1",
"babel-traverse": "^6.26.0",
"babylon": "^6.18.0",
"better-sqlite3": "^11.8.1", "better-sqlite3": "^11.8.1",
"canvas": "^3.1.0", "canvas": "^3.1.0",
"cheerio": "^1.0.0", "cheerio": "^1.0.0",
"egoroof-blowfish": "4.0.1", "egoroof-blowfish": "4.0.1",
"es-main": "^1.3.0", "es-main": "^1.3.0",
"filesize": "^10.1.6", "filesize": "^10.1.6",
"imapflow": "^1.0.193",
"json5": "^2.2.3", "json5": "^2.2.3",
"kuromoji": "^0.1.2", "kuromoji": "^0.1.2",
"mailparser": "^3.7.4",
"nanoid": "^5.0.9", "nanoid": "^5.0.9",
"node-libcurl-ja3": "^5.0.3",
"patchright": "^1.52.5",
"plist": "^3.1.0", "plist": "^3.1.0",
"qrcode-terminal": "^0.12.0", "qrcode-terminal": "^0.12.0",
"spinnies": "^0.5.1", "spinnies": "^0.5.1",
"tough-cookie": "^5.0.0", "tough-cookie": "^5.0.0",
"tough-cookie-file-store": "^2.0.3", "tough-cookie-file-store": "^2.0.3",
"ts-morph": "^26.0.0",
"tsx": "^4.19.2", "tsx": "^4.19.2",
"undici": "^7.2.0", "undici": "^7.2.0",
"wanakana": "^5.3.1" "wanakana": "^5.3.1"
@ -33,6 +41,7 @@
"@antfu/eslint-config": "3.10.0", "@antfu/eslint-config": "3.10.0",
"@fuman/fetch": "0.1.0", "@fuman/fetch": "0.1.0",
"@fuman/utils": "0.0.14", "@fuman/utils": "0.0.14",
"@types/mailparser": "^3.4.6",
"@types/node": "22.10.0", "@types/node": "22.10.0",
"domhandler": "^5.0.3", "domhandler": "^5.0.3",
"dotenv": "16.4.5", "dotenv": "16.4.5",
@ -43,6 +52,7 @@
"pnpm": { "pnpm": {
"onlyBuiltDependencies": [ "onlyBuiltDependencies": [
"better-sqlite3", "better-sqlite3",
"node-libcurl-ja3",
"canvas" "canvas"
] ]
} }

1464
pnpm-lock.yaml generated

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,22 @@
import { fetchSongsIter } from '../../../utils/navidrome.ts'
const IGNORE_PATHS = [
's3/Electronic/_Compilations/keygenjukebox/',
]
let count = 0
for await (const song of fetchSongsIter()) {
if (IGNORE_PATHS.some(path => song.path.startsWith(path))) {
continue
}
for (const field of ['mbzRecordingID', 'mbzReleaseTrackId', 'mbzAlbumId', 'mbzReleaseGroupId']) {
if (!song[field]) {
console.log('found missing %s: %s - %s (%s)', field, song.artist, song.title, song.path)
count++
break
}
}
}
console.log('found %d tracks without mbz ids', count)

View file

@ -0,0 +1,21 @@
import { fetchSongsIter } from '../../../utils/navidrome.ts'
const WHITELIST_ARTISTS = new Set([
'betwixt & between',
'10th avenue cafe/tak',
'overmind and potatoes',
])
let count = 0
for await (const song of fetchSongsIter()) {
if (
(!song.participants?.artist || song.participants.artist.length === 1)
&& song.artist.match(/, | and | & |\/| x | feat\. /i)
&& !WHITELIST_ARTISTS.has(song.artist.toLowerCase())
) {
console.log('possible multiartist: %s - %s (%s)', song.artist, song.title, song.path)
count++
}
}
console.log('found %d possible multiartists', count)

View file

@ -311,12 +311,93 @@ function getTrackName(track: GwTrack) {
return name return name
} }
// todo
// async function resolveMusicbrainzIds(albumId: number) {
// const deezerUrl = `https://www.deezer.com/album/${albumId}`
// // try odesli api to fetch extra links
// const odesliRes = await ffetch('https://api.song.link/v1-alpha.1/links', {
// query: {
// url: deezerUrl,
// key: '71d7be8a-3a76-459b-b21e-8f0350374984',
// },
// }).parsedJson(z.object({
// linksByPlatform: z.record(z.string(), z.object({
// url: z.string(),
// })),
// })).catch(() => null)
// const urls = [deezerUrl]
// if (odesliRes) {
// for (const { url } of Object.values(odesliRes.linksByPlatform)) {
// urls.push(url)
// }
// }
// // try to resolve musicbrainz album id
// const mbRes1 = await ffetch('https://musicbrainz.org/ws/2/url', {
// query: {
// resource: urls,
// inc: 'release-rels',
// },
// }).parsedJson(z.object({
// urls: z.array(z.object({
// relations: z.array(z.any()),
// })),
// }))
// const uniqueMbIds = new Set<string>()
// for (const { relations } of mbRes1.urls) {
// for (const rel of relations) {
// if (rel['target-type'] !== 'release') continue
// uniqueMbIds.add(rel.release.id)
// }
// }
// if (uniqueMbIds.size === 0) return null
// const releaseMbId = uniqueMbIds.values().next().value
// // resolve the rest of the ids from the release
// const releaseRes = await ffetch(`https://musicbrainz.org/ws/2/release/${releaseMbId}`, {
// query: {
// inc: 'artists recordings',
// },
// }).parsedJson(z.object({
// 'artist-credit': z.array(z.object({
// artist: z.object({
// id: z.string(),
// }),
// })).optional(),
// 'media': z.array(z.object({
// id: z.string(),
// tracks: z.array(z.object({
// position: z.number(),
// title: z.string(),
// id: z.string(),
// recording: z.object({
// id: z.string(),
// }),
// })),
// })).optional(),
// }))
// return {
// release: releaseMbId,
// artists: releaseRes['artist-credit']?.map(it => it.artist.id) ?? [],
// tracks: releaseRes['media']?.[0]
// }
// }
async function downloadTrack(track: GwTrack, opts: { async function downloadTrack(track: GwTrack, opts: {
destination: string destination: string
album?: GwAlbum album?: GwAlbum
}) { }) {
const albumUrl = `https://cdn-images.dzcdn.net/images/cover/${track.ALB_PICTURE}/1500x1500-000000-80-0-0.jpg` const albumUrl = `https://cdn-images.dzcdn.net/images/cover/${track.ALB_PICTURE}/1500x1500-000000-80-0-0.jpg`
const [getUrlRes, albumAb, lyricsRes] = await Promise.all([ const [
getUrlRes,
albumAb,
lyricsRes,
] = await Promise.all([
ffetch.post('https://media.deezer.com/v1/get_url', { ffetch.post('https://media.deezer.com/v1/get_url', {
json: { json: {
license_token: userData.USER.OPTIONS.license_token, license_token: userData.USER.OPTIONS.license_token,
@ -343,6 +424,8 @@ async function downloadTrack(track: GwTrack, opts: {
}), }),
]) ])
// console.dir(getUrlRes, { depth: null })
const albumCoverPath = join(`assets/deezer-tmp-${track.SNG_ID}.jpg`) const albumCoverPath = join(`assets/deezer-tmp-${track.SNG_ID}.jpg`)
await writeFile(albumCoverPath, new Uint8Array(albumAb)) await writeFile(albumCoverPath, new Uint8Array(albumAb))
@ -487,6 +570,10 @@ async function downloadTrack(track: GwTrack, opts: {
params.push(`--set-tag=COPYRIGHT=${opts.album.COPYRIGHT}`) params.push(`--set-tag=COPYRIGHT=${opts.album.COPYRIGHT}`)
} }
if (lyricsLrc) {
params.push(`--set-tag=LYRICS=${lyricsLrc}`)
}
params.push(filename) params.push(filename)
await $`metaflac ${params}` await $`metaflac ${params}`
@ -605,9 +692,10 @@ async function downloadArtist(artistId: string) {
spinnies.succeed('collect', { text: `collected ${albums.length} albums with a total of ${trackCount} tracks` }) spinnies.succeed('collect', { text: `collected ${albums.length} albums with a total of ${trackCount} tracks` })
} }
// fixme: singles should always contain artist name and be saved in artist root dir // fixme: "featured" albums/tracks (i.e. when main artist of the album is not the one we're dling) should have album artist name in its dirname
// fixme: "featured" albums (i.e. when main artist of the album is not the one we're dling) should have album artist name in its dirname // fixme: singles should be saved in artist root dir
// todo: automatic musicbrainz matching // todo: automatic musicbrainz matching
// todo: automatic genius/musixmatch matching for lyrics if unavailable directly from deezer
await asyncPool(albums, async (alb) => { await asyncPool(albums, async (alb) => {
const tracks = await gwLightApi({ const tracks = await gwLightApi({
@ -784,6 +872,14 @@ if (url.match(/^(artist|album|track):(\d+)$/)) {
node: z.object({ node: z.object({
id: z.string(), id: z.string(),
title: z.string(), title: z.string(),
contributors: z.object({
edges: z.array(z.object({
node: z.object({
id: z.string(),
name: z.string(),
}),
})),
}),
}), }),
})), })),
}), }),
@ -801,7 +897,7 @@ if (url.match(/^(artist|album|track):(\d+)$/)) {
} }
for (const [i, { node }] of iter.enumerate(searchResult.instantSearch.results.tracks.edges)) { for (const [i, { node }] of iter.enumerate(searchResult.instantSearch.results.tracks.edges)) {
console.log(`track:${node.id}: ${node.title}`) console.log(`track:${node.id}: ${node.contributors.edges.map(it => it.node.name).join(', ')} - ${node.title}`)
} }
const uri = await question('option > ') const uri = await question('option > ')

View file

@ -1,87 +0,0 @@
import { sleep } from '@fuman/utils'
import { z } from 'zod'
import { ffetch } from './fetch.ts'
import { getEnv } from './misc.ts'
const CreateTaskResponse = z.object({
errorId: z.number(),
errorCode: z.string().optional().nullable(),
taskId: z.number(),
})
const GetTaskResultResponse = z.object({
errorId: z.number(),
errorCode: z.string().optional().nullable(),
status: z.enum(['ready', 'processing']),
solution: z.unknown().optional(),
})
export async function solveCaptcha(task: unknown) {
const res = await ffetch.post('https://api.capmonster.cloud/createTask', {
json: {
clientKey: getEnv('CAPMONSTER_API_TOKEN'),
task,
},
}).parsedJson(CreateTaskResponse)
if (res.errorId) {
throw new Error(`createTask error ${res.errorId}: ${res.errorCode}`)
}
const taskId = res.taskId
await sleep(5_000)
let requestCount = 0
while (true) {
requestCount += 1
if (requestCount > 100) {
// "Limit: 120 requests per task. If the limit is exceeded, the user's account may be temporarily locked."
// just to be safe
throw new Error('captcha request count exceeded')
}
const res = await ffetch.post('https://api.capmonster.cloud/getTaskResult', {
json: {
clientKey: getEnv('CAPMONSTER_API_TOKEN'),
taskId,
},
}).parsedJson(GetTaskResultResponse)
if (res.errorId) {
throw new Error(`getTaskResult error ${res.errorId}: ${res.errorCode}`)
}
if (res.status === 'ready') {
return res.solution
}
await sleep(2_000)
}
}
export async function solveRecaptcha(params?: {
url: string
siteKey: string
s?: string
userAgent?: string
cookies?: string
isInvisible?: boolean
}) {
const res = await solveCaptcha({
type: 'RecaptchaV2TaskProxyless',
websiteURL: params?.url,
websiteKey: params?.siteKey,
recaptchaDataSValue: params?.s,
userAgent: params?.userAgent,
cookies: params?.cookies,
isInvisible: params?.isInvisible,
})
if (typeof res !== 'object' || !res || !('gRecaptchaResponse' in res) || typeof res.gRecaptchaResponse !== 'string') {
throw new Error('invalid recaptcha response')
}
return res.gRecaptchaResponse
}

View file

@ -33,6 +33,16 @@ export const NavidromeSong = z.object({
libraryPath: z.string(), libraryPath: z.string(),
duration: z.number(), duration: z.number(),
size: z.number(), size: z.number(),
participants: z.object({
artist: z.object({
id: z.string(),
name: z.string(),
}).array().optional(),
}).optional(),
mbzRecordingID: z.string().optional(),
mbzReleaseTrackId: z.string().optional(),
mbzAlbumId: z.string().optional(),
mbzReleaseGroupId: z.string().optional(),
}) })
export type NavidromeSong = z.infer<typeof NavidromeSong> export type NavidromeSong = z.infer<typeof NavidromeSong>
@ -43,7 +53,7 @@ export async function fetchSongs(offset: number, pageSize: number) {
_start: offset, _start: offset,
_end: offset + pageSize, _end: offset + pageSize,
_order: 'ASC', _order: 'ASC',
_sort: 'title', _sort: 'path',
}, },
}).parsedJson(z.array(NavidromeSong)) }).parsedJson(z.array(NavidromeSong))
} }