diff --git a/lib/globalScope.ts b/lib/globalScope.ts index 30cb40cb..27c97e69 100644 --- a/lib/globalScope.ts +++ b/lib/globalScope.ts @@ -13,6 +13,7 @@ import type { import type { IFileAction, IFileListAction } from './ui/actions/index.ts' import type { FilesRegistry } from './ui/registry.ts' import type { ISidebarAction, ISidebarTab } from './ui/sidebar/index.ts' +import type { Uploader } from './upload/index.ts' interface InternalGlobalScope { davNamespaces?: DavProperty @@ -22,6 +23,8 @@ interface InternalGlobalScope { navigation?: Navigation registry?: FilesRegistry + uploader?: Uploader + fileActions?: Map fileListActions?: Map fileListFilters?: Map diff --git a/lib/upload/errors/UploadCancelledError.spec.ts b/lib/upload/errors/UploadCancelledError.spec.ts new file mode 100644 index 00000000..e270eff7 --- /dev/null +++ b/lib/upload/errors/UploadCancelledError.spec.ts @@ -0,0 +1,17 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { expect, test } from 'vitest' +import { UploadCancelledError } from './UploadCancelledError.ts' + +test('UploadCancelledError', () => { + const cause = new Error('Network error') + const error = new UploadCancelledError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadCancelledError) + expect(error.message).toBe('Upload has been cancelled') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_CANCELLED__') +}) diff --git a/lib/upload/errors/UploadCancelledError.ts b/lib/upload/errors/UploadCancelledError.ts new file mode 100644 index 00000000..cce917d5 --- /dev/null +++ b/lib/upload/errors/UploadCancelledError.ts @@ -0,0 +1,12 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +export class UploadCancelledError extends Error { + private __UPLOAD_CANCELLED__ = true + + public constructor(cause?: unknown) { + super('Upload has been cancelled', { cause }) + } +} diff --git a/lib/upload/errors/UploadFailedError.spec.ts b/lib/upload/errors/UploadFailedError.spec.ts new file mode 100644 index 00000000..1a16aa8f --- /dev/null +++ b/lib/upload/errors/UploadFailedError.spec.ts @@ -0,0 +1,44 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosRequestHeaders, AxiosResponse } from 'axios' + +import { AxiosError } from 'axios' +import { expect, test } from 'vitest' +import { UploadFailedError } from './UploadFailedError.ts' + +test('UploadFailedError - axios error but no response', () => { + const cause = new AxiosError('Network error') + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBeUndefined() +}) + +test('UploadFailedError - axios error', () => { + const response = {} as AxiosResponse + const cause = new AxiosError('Network error', '200', { headers: {} as AxiosRequestHeaders }, {}, response) + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBe(response) +}) + +test('UploadFailedError - generic error', () => { + const cause = new Error('Generic error') + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBeUndefined() +}) diff --git a/lib/upload/errors/UploadFailedError.ts b/lib/upload/errors/UploadFailedError.ts new file mode 100644 index 00000000..e85b3f78 --- /dev/null +++ b/lib/upload/errors/UploadFailedError.ts @@ -0,0 +1,21 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosResponse } from '@nextcloud/axios' + +import { isAxiosError } from '@nextcloud/axios' + +export class UploadFailedError extends Error { + private __UPLOAD_FAILED__ = true + + readonly response?: AxiosResponse + + public constructor(cause?: unknown) { + super('Upload has failed', { cause }) + if (isAxiosError(cause) && cause.response) { + this.response = cause.response + } + } +} diff --git a/lib/upload/getUploader.spec.ts b/lib/upload/getUploader.spec.ts new file mode 100644 index 00000000..d5855974 --- /dev/null +++ b/lib/upload/getUploader.spec.ts @@ -0,0 +1,31 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { join } from '@nextcloud/paths' +import { expect, test } from 'vitest' +import { defaultRemoteURL, defaultRootPath } from '../dav/dav.ts' +import { scopedGlobals } from '../globalScope.ts' +import { Folder } from '../node/folder.ts' +import { getUploader } from './getUploader.ts' +import { Uploader } from './uploader/Uploader.ts' + +test('getUploader - should return the uploader instance from the global scope', async () => { + const uploader = new Uploader(false, new Folder({ owner: 'test', root: defaultRootPath, source: join(defaultRemoteURL, defaultRootPath) })) + scopedGlobals.uploader = uploader + const returnedUploader = getUploader() + expect(returnedUploader).toBe(uploader) +}) + +test('getUploader - should return the same instance on multiple calls', async () => { + const uploader1 = getUploader() + const uploader2 = getUploader() + expect(uploader1).toBe(uploader2) +}) + +test('getUploader - should not return the same instance on multiple calls with forceRecreate', async () => { + const uploader1 = getUploader(true) + const uploader2 = getUploader(true, true) + expect(uploader1).not.toBe(uploader2) +}) diff --git a/lib/upload/getUploader.ts b/lib/upload/getUploader.ts new file mode 100644 index 00000000..ed28bfd7 --- /dev/null +++ b/lib/upload/getUploader.ts @@ -0,0 +1,26 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { isPublicShare } from '@nextcloud/sharing/public' +import { scopedGlobals } from '../globalScope.ts' +import { Uploader } from './uploader/Uploader.ts' + +/** + * Get the global Uploader instance. + * + * Note: If you need a local uploader you can just create a new instance, + * this global instance will be shared with other apps and is mostly useful + * for the Files app web UI to keep track of all uploads and their progress. + * + * @param isPublic Set to true to use public upload endpoint (by default it is auto detected) + * @param forceRecreate Force a new uploader instance - main purpose is for testing + */ +export function getUploader(isPublic: boolean = isPublicShare(), forceRecreate = false): Uploader { + if (forceRecreate || scopedGlobals.uploader === undefined) { + scopedGlobals.uploader = new Uploader(isPublic) + } + + return scopedGlobals.uploader +} diff --git a/lib/upload/index.ts b/lib/upload/index.ts new file mode 100644 index 00000000..8d749dee --- /dev/null +++ b/lib/upload/index.ts @@ -0,0 +1,9 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +export { UploadCancelledError } from './errors/UploadCancelledError.ts' +export { UploadFailedError } from './errors/UploadFailedError.ts' +export * from './uploader/index.ts' +export { getUploader } from './getUploader.ts' diff --git a/lib/upload/uploader/Eta.spec.ts b/lib/upload/uploader/Eta.spec.ts new file mode 100644 index 00000000..f5863a2c --- /dev/null +++ b/lib/upload/uploader/Eta.spec.ts @@ -0,0 +1,303 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { afterAll, beforeAll, describe, expect, it, test, vi } from 'vitest' +import { Eta, EtaStatus } from './Eta.ts' + +describe('ETA - status', () => { + it('has default set', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.speed).toBe(-1) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('can autostart in constructor', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.speed).toBe(-1) + }) + + it('can reset', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + + eta.add(10) + expect(eta.progress).toBe(10) + + eta.reset() + expect(eta.status).toBe(EtaStatus.Idle) + expect(eta.progress).toBe(0) + }) + + it('does not update when idle', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + + eta.update(10, 100) + expect(eta.progress).toBe(0) + + eta.add(10) + expect(eta.progress).toBe(0) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('does not update when paused', () => { + const eta = new Eta({ start: true, total: 100 }) + eta.add(10) + expect(eta.progress).toBe(10) + + eta.pause() + eta.add(10) + expect(eta.progress).toBe(10) + expect(eta.status).toBe(EtaStatus.Paused) + }) + + it('can resume', () => { + const eta = new Eta() + expect(eta.status).toBe(EtaStatus.Idle) + eta.resume() + expect(eta.status).toBe(EtaStatus.Running) + }) +}) + +describe('ETA - progress', () => { + beforeAll(() => vi.useFakeTimers()) + afterAll(() => vi.useRealTimers()) + + test('progress calculation', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // First upload some parts with about 5MiB/s which should take 3s (total 20s) + for (let i = 1; i <= 6; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + expect(eta.progress).toBe(i * 2.5) + expect(eta.speed).toBe(-1) + expect(eta.time).toBe(Infinity) + } + + // this is reached after (virtual) 3s with 6 * 2.5MiB (=15MiB) data of 100MiB total + expect(eta.time).toBe(Infinity) + + // Adding another 500ms with 5MiB/s will result in enough information for estimating + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + expect(eta.progress).toBe(17.5) + expect(eta.speed).toMatchInlineSnapshot('4826778') + expect(eta.speedReadable).toMatchInlineSnapshot('"4.6 MB∕s"') + expect(eta.time).toMatchInlineSnapshot('18') + + // Skip forward another 4.5seconds + for (let i = 0; i < 9; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + } + // See we made some progress + expect(eta.progress).toBe(40) + // See as we have constant speed, the speed is closing to 5MiB/s (5242880) + expect(eta.speed).toMatchInlineSnapshot('5060836') + expect(eta.speedReadable).toMatchInlineSnapshot('"4.8 MB∕s"') + expect(eta.time).toMatchInlineSnapshot('12') + + // Having a spike of 10MiB/s will not result in halfing the eta + vi.advanceTimersByTime(500) + eta.add(5 * 1024 * 1024) + expect(eta.progress).toBe(45) + // See the value is not doubled + expect(eta.speed).toMatchInlineSnapshot('5208613') + expect(eta.speedReadable).toMatchInlineSnapshot('"5 MB∕s"') + // And the time has not halved + expect(eta.time).toMatchInlineSnapshot('11') + + // Add another 3 seconds so we should see 'few seconds left' + for (let i = 0; i < 6; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + } + expect(eta.progress).toBe(60) + expect(eta.speed).toMatchInlineSnapshot('5344192') + expect(eta.time).toMatchInlineSnapshot('8') + }) + + test('long running progress', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // First upload some parts with about 1MiB/s + for (let i = 1; i <= 6; i++) { + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(i / 2) + expect(eta.speed).toBe(-1) + expect(eta.time).toBe(Infinity) + } + + // Now we should be able to see some progress + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(3.5) + expect(eta.time).toBe(105) + + // Add another minute and we should see only seconds: + for (let i = 0; i < 120; i++) { + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(4 + 0.5 * i) + } + + // Now we have uploaded 63.5 MiB - so 36.5 MiB missing by having 1MiB/s upload speed we expect 37 seconds left: + expect(eta.progress).toBe(63.5) + expect(eta.time).toBe(37) + }) + + test('progress calculation for fast uploads', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // we have 100 MiB - when uploading with 40 MiB/s the time will be just like 2.5 seconds + // so not enough for estimation, instead we use the current speed to at least show that it is very fast + + // First chunk will not show any information as we initialize the system + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(20) + expect(eta.speed).toBe(-1) + expect(eta.time).toBe(Infinity) + + // Now we have some information but not enough for normal estimation + // yet we show some information as the upload is very fast (40% per second) + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(40) + expect(eta.time).toBe(1.5) + // still no speed information + expect(eta.speed).toBe(-1) + + // same check for the last 60MiB + for (let i = 1; i <= 3; i++) { + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(40 + i * 20) + expect(eta.time).toBe(1.5 - (i / 2)) + // still no speed information + expect(eta.speed).toBe(-1) + } + expect(eta.progress).toBe(100) + }) + + it('can autostart in constructor', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.speed).toBe(-1) + }) + + it('can reset', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + + eta.add(10) + expect(eta.progress).toBe(10) + + eta.reset() + expect(eta.status).toBe(EtaStatus.Idle) + expect(eta.progress).toBe(0) + }) + + it('does not update when idle', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + + eta.update(10, 100) + expect(eta.progress).toBe(0) + + eta.add(10) + expect(eta.progress).toBe(0) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('does not update when paused', () => { + const eta = new Eta({ start: true, total: 100 }) + eta.add(10) + expect(eta.progress).toBe(10) + + eta.pause() + eta.add(10) + expect(eta.progress).toBe(10) + expect(eta.status).toBe(EtaStatus.Paused) + }) + + it('can resume', () => { + const eta = new Eta() + expect(eta.status).toBe(EtaStatus.Idle) + eta.resume() + expect(eta.status).toBe(EtaStatus.Running) + }) +}) + +describe('ETA - events', () => { + it('emits updated event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('update', spy) + + // only works when running so nothing should happen + eta.update(10, 100) + expect(spy).not.toBeCalled() + + // now start and update + eta.resume() + eta.update(10, 100) + expect(spy).toBeCalledTimes(1) + }) + + it('emits reset event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('reset', spy) + + eta.reset() + expect(spy).toBeCalledTimes(1) + }) + + it('emits pause event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('pause', spy) + + // cannot pause if not running + eta.pause() + expect(spy).toBeCalledTimes(0) + + // start + eta.resume() + expect(spy).toBeCalledTimes(0) + + // Pause - this time the event should be emitted + eta.pause() + expect(spy).toBeCalledTimes(1) + // double pause does nothing + eta.pause() + expect(spy).toBeCalledTimes(1) + }) + + it('emits resume event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('resume', spy) + + eta.resume() + expect(spy).toBeCalledTimes(1) + // already resumed so nothing happens + eta.resume() + expect(spy).toBeCalledTimes(1) + }) +}) diff --git a/lib/upload/uploader/Eta.ts b/lib/upload/uploader/Eta.ts new file mode 100644 index 00000000..b80552f7 --- /dev/null +++ b/lib/upload/uploader/Eta.ts @@ -0,0 +1,208 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { TypedEventTarget } from 'typescript-event-target' +import { formatFileSize } from '../../utils/fileSize.ts' + +export const EtaStatus = Object.freeze({ + Idle: 0, + Paused: 1, + Running: 2, +}) + +type TEtaStatus = typeof EtaStatus[keyof typeof EtaStatus] + +interface EtaOptions { + /** Low pass filter cutoff time for smoothing the speed */ + cutoffTime?: number + /** Total number of bytes to be expected */ + total?: number + /** Start the estimation directly */ + start?: boolean +} + +export interface EtaEventsMap { + pause: CustomEvent + reset: CustomEvent + resume: CustomEvent + update: CustomEvent +} + +export class Eta extends TypedEventTarget { + /** Bytes done */ + private _done: number = 0 + /** Total bytes to do */ + private _total: number = 0 + /** Current progress (cached) as interval [0,1] */ + private _progress: number = 0 + /** Status of the ETA */ + private _status: TEtaStatus = EtaStatus.Idle + /** Time of the last update */ + private _startTime: number = -1 + /** Total elapsed time for current ETA */ + private _elapsedTime: number = 0 + /** Current speed in bytes per second */ + private _speed: number = -1 + /** Expected duration to finish in seconds */ + private _eta: number = Infinity + + /** + * Cutoff time for the low pass filter of the ETA. + * A higher value will consider more history information for calculation, + * and thus suppress spikes of the speed, + * but will make the overall resposiveness slower. + */ + private _cutoffTime = 2.5 + + public constructor(options: EtaOptions = {}) { + super() + if (options.start) { + this.resume() + } + if (options.total) { + this.update(0, options.total) + } + this._cutoffTime = options.cutoffTime ?? 2.5 + } + + /** + * Add more transferred bytes. + * + * @param done Additional bytes done. + */ + public add(done: number): void { + this.update(this._done + done) + } + + /** + * Update the transmission state. + * + * @param done The new value of transferred bytes. + * @param total Optionally also update the total bytes we expect. + */ + public update(done: number, total?: number): void { + if (this.status !== EtaStatus.Running) { + return + } + if (total && total > 0) { + this._total = total + } + + const deltaDone = done - this._done + const deltaTime = (Date.now() - this._startTime) / 1000 + + this._startTime = Date.now() + this._elapsedTime += deltaTime + this._done = done + this._progress = this._done / this._total + + // Only update speed when the history is large enough so we can estimate it + const historyNeeded = this._cutoffTime + deltaTime + if (this._elapsedTime > historyNeeded) { + // Filter the done bytes using a low pass filter to suppress speed spikes + const alpha = deltaTime / (deltaTime + (1 / this._cutoffTime)) + const filtered = (this._done - deltaDone) + (1 - alpha) * deltaDone + // bytes per second - filtered + this._speed = Math.round(filtered / this._elapsedTime) + } else if (this._speed === -1 && this._elapsedTime > deltaTime) { + // special case when uploading with high speed + // it could be that the upload is finished before we reach the curoff time + // so we already give an estimation + const remaining = this._total - done + const eta = remaining / (done / this._elapsedTime) + // Only set the ETA when we either already set it for a previous update + // or when the special case happened that we are in fast upload and we only got a couple of seconds for the whole upload + // meaning we are below 2x the cutoff time. + if (this._eta !== Infinity || eta <= 2 * this._cutoffTime) { + // We only take a couple of seconds so we set the eta to the current ETA using current speed. + // But we do not set the speed because we do not want to trigger the real ETA calculation below + // and especially because the speed would be very spiky (we still have no filters in place). + this._eta = eta + } + } + + // Update the eta if we have valid speed information (prevent divide by zero) + if (this._speed > 0) { + // Estimate transfer of remaining bytes with current average speed + this._eta = Math.round((this._total - this._done) / this._speed) + } + + this.dispatchTypedEvent('update', new CustomEvent('update', { cancelable: false })) + } + + public reset(): void { + this._done = 0 + this._total = 0 + this._progress = 0 + this._elapsedTime = 0 + this._eta = Infinity + this._speed = -1 + this._startTime = -1 + this._status = EtaStatus.Idle + this.dispatchTypedEvent('reset', new CustomEvent('reset')) + } + + /** + * Pause the ETA calculation. + */ + public pause(): void { + if (this._status === EtaStatus.Running) { + this._status = EtaStatus.Paused + this._elapsedTime += (Date.now() - this._startTime) / 1000 + this.dispatchTypedEvent('pause', new CustomEvent('pause')) + } + } + + /** + * Resume the ETA calculation. + */ + public resume(): void { + if (this._status !== EtaStatus.Running) { + this._startTime = Date.now() + this._status = EtaStatus.Running + this.dispatchTypedEvent('resume', new CustomEvent('resume')) + } + } + + /** + * Status of the Eta (paused, active, idle). + */ + public get status(): TEtaStatus { + return this._status + } + + /** + * Progress (percent done) + */ + public get progress(): number { + return Math.round(this._progress * 10000) / 100 + } + + /** + * Estimated time in seconds. + * If the time is not yet estimated, it will return `Infinity`. + */ + public get time(): number { + return this._eta + } + + /** + * Transfer speed in bytes per second. + * Returns `-1` if not yet estimated. + */ + public get speed(): number { + return this._speed + } + + /** + * Get the speed in human readable format using file sizes like 10KB/s. + * Returns the empty string if not yet estimated. + */ + public get speedReadable(): string { + return this._speed > 0 + ? `${formatFileSize(this._speed, true)}∕s` + : '' + } +} diff --git a/lib/upload/uploader/Upload.ts b/lib/upload/uploader/Upload.ts new file mode 100644 index 00000000..7d3ef607 --- /dev/null +++ b/lib/upload/uploader/Upload.ts @@ -0,0 +1,106 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type PQueue from 'p-queue' + +import { TypedEventTarget } from 'typescript-event-target' + +export const UploadStatus = Object.freeze({ + /** The upload was initialized */ + INITIALIZED: 0, + /** The upload was scheduled but is not yet uploading */ + SCHEDULED: 1, + /** The upload itself is running */ + UPLOADING: 2, + /** Chunks are being assembled */ + ASSEMBLING: 3, + /** The upload finished successfully */ + FINISHED: 4, + /** The upload was cancelled by the user */ + CANCELLED: 5, + /** The upload failed */ + FAILED: 6, +}) + +export type TUploadStatus = typeof UploadStatus[keyof typeof UploadStatus] + +interface UploadEvents { + finished: CustomEvent + progress: CustomEvent +} + +export interface IUpload extends TypedEventTarget { + /** + * The source of the upload + */ + readonly source: string + /** + * Whether the upload is chunked or not + */ + readonly isChunked: boolean + /** + * The total size of the upload in bytes + */ + readonly totalBytes: number + /** + * Timestamp of when the upload started. + * Will return `undefined` if the upload has not started yet. + */ + readonly startTime?: number + /** + * The number of bytes that have been uploaded so far + */ + readonly uploadedBytes: number + /** + * The current status of the upload + */ + readonly status: TUploadStatus + /** + * The internal abort signal + */ + readonly signal: AbortSignal + + /** + * The children of this upload. + * - For a file upload, this will be an empty array. + * - For a folder upload, this will be the uploads of the children files and folders. + */ + readonly children: IUpload[] + + /** + * Cancels the upload + */ + cancel(): void +} + +export abstract class Upload extends TypedEventTarget implements Partial { + #abortController = new AbortController() + + get signal(): AbortSignal { + return this.#abortController.signal + } + + /** + * Cancels the upload + */ + public cancel(): void { + this.#abortController.abort() + } + + /** + * Get the children of this upload. + * For a file upload, this will be an empty array, for a folder upload, this will be the uploads of the children files and folders. + */ + public get children(): IUpload[] { + return [] + } + + /** + * Start the upload + * + * @param queue - The job queue. It is used to limit the number of concurrent upload jobs. + */ + public abstract start(queue: PQueue): Promise +} diff --git a/lib/upload/uploader/UploadFile.spec.ts b/lib/upload/uploader/UploadFile.spec.ts new file mode 100644 index 00000000..8eae4c20 --- /dev/null +++ b/lib/upload/uploader/UploadFile.spec.ts @@ -0,0 +1,198 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { CanceledError } from 'axios' +import { describe, expect, it, vi } from 'vitest' +import { UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' + +const isPublicShareMock = vi.hoisted(() => vi.fn()) +vi.mock('@nextcloud/sharing/public', () => ({ isPublicShare: isPublicShareMock })) + +const initChunkWorkspaceMock = vi.hoisted(() => vi.fn()) +const uploadDataMock = vi.hoisted(() => vi.fn()) +vi.mock('../utils/upload.ts', async () => ({ + ...(await vi.importActual('../utils/upload.ts')), + initChunkWorkspace: initChunkWorkspaceMock, + uploadData: uploadDataMock, +})) + +const getMaxChunksSizeMock = vi.hoisted(() => vi.fn()) +const supportsPublicChunkingMock = vi.hoisted(() => vi.fn()) +vi.mock('../utils/config.ts', () => ({ + getMaxChunksSize: getMaxChunksSizeMock, + supportsPublicChunking: supportsPublicChunkingMock, +})) + +describe('chunking', () => { + it('enables chunking for non-public shares', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(true) + }) + + it('enables chunking for public shares', () => { + isPublicShareMock.mockReturnValue(true) + supportsPublicChunkingMock.mockReturnValue(true) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(true) + }) + + it('disables chunking if too small', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1000)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if explicitly disabled', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: true }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if disabled by admin', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(0) + const uploadFile = new UploadFile('/destination', new File([], 'filename'), { noChunking: true }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if not supported by public shares', () => { + isPublicShareMock.mockReturnValue(true) + supportsPublicChunkingMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(false) + }) + + it.each([ + [0, 1], + [1024, 1], + [1025, 2], + [2048, 2], + [2049, 3], + ])('calculates number of chunks correctly for file size %i', async (fileSize, expectedChunks) => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(fileSize)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(expectedChunks > 1) + + const { resolve, promise } = Promise.withResolvers() + const queue = { add: vi.fn(() => resolve()) } + uploadFile.start(queue as never) + + // wait for queue to be called + await promise + expect(uploadFile.numberOfChunks).toBe(expectedChunks) + }) +}) + +describe('upload status and events', () => { + it('initialized', () => { + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.status).toBe(UploadStatus.INITIALIZED) + }) + + it('scheduled', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + const { resolve, promise } = Promise.withResolvers() + const queue = { add: vi.fn(() => resolve()) } + + uploadFile.start(queue as never) + // wait for queue to be called + await promise + expect(uploadFile.status).toBe(UploadStatus.SCHEDULED) + }) + + it('uploading', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const { promise: uploadDataPromise } = Promise.withResolvers() + uploadDataMock.mockImplementationOnce(() => uploadDataPromise) + + const { promise: queuePromise, resolve: queueResolve } = Promise.withResolvers() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + const queue = { add: vi.fn((fn: () => Promise) => (queueResolve(), fn())) } + // start upload and wait for queue to be called + uploadFile.start(queue as never) + await queuePromise + + expect(uploadFile.status).toBe(UploadStatus.UPLOADING) + }) + + it('finished', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.resolve()) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]() + expect(uploadFile.status).toBe(UploadStatus.FINISHED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('cancelled by DOM', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new DOMException('Aborted', 'AbortError'))) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.CANCELLED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('cancelled by axios', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new CanceledError())) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.CANCELLED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('failed', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new Error('generic error'))) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.FAILED) + expect(onFinish).toHaveBeenCalledOnce() + }) +}) diff --git a/lib/upload/uploader/UploadFile.ts b/lib/upload/uploader/UploadFile.ts new file mode 100644 index 00000000..3984f30f --- /dev/null +++ b/lib/upload/uploader/UploadFile.ts @@ -0,0 +1,214 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type PQueue from 'p-queue' +import type { IUpload, TUploadStatus } from './Upload.ts' + +import axios from '@nextcloud/axios' +import { join } from '@nextcloud/paths' +import { isPublicShare } from '@nextcloud/sharing/public' +import { UploadCancelledError } from '../errors/UploadCancelledError.ts' +import { UploadFailedError } from '../errors/UploadFailedError.ts' +import { getMaxChunksSize, supportsPublicChunking } from '../utils/config.ts' +import { getMtimeHeader, isRequestAborted } from '../utils/requests.ts' +import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' +import { Upload, UploadStatus } from './Upload.ts' + +/** + * A class representing a single file to be uploaded + */ +export class UploadFile extends Upload implements IUpload { + #customHeaders: Record + #fileHandle: File | FileSystemFileEntry + #file?: File + #noChunking: boolean + + public source: string + public status: TUploadStatus = UploadStatus.INITIALIZED + public startTime?: number + public totalBytes: number = 0 + public uploadedBytes: number = -1 + public numberOfChunks: number = 1 + + constructor( + destination: string, + fileHandle: File | FileSystemFileEntry, + options: { headers?: Record, noChunking?: boolean }, + ) { + super() + const { + headers = {}, + noChunking = false, + } = options + + // exposed state + this.source = destination + this.totalBytes = 'size' in fileHandle ? fileHandle.size : -1 + + // private state + this.#fileHandle = fileHandle + this.#customHeaders = headers + this.#noChunking = noChunking + this.signal.addEventListener('abort', () => { + if (this.status !== UploadStatus.FAILED) { + this.status = UploadStatus.CANCELLED + } + }) + } + + get isChunked(): boolean { + const maxChunkSize = getMaxChunksSize('size' in this.#fileHandle ? this.#fileHandle.size : undefined) + return !this.#noChunking + && maxChunkSize > 0 + && this.totalBytes > maxChunkSize + && (!isPublicShare() || supportsPublicChunking()) + } + + async start(queue: PQueue): Promise { + if (this.status !== UploadStatus.INITIALIZED) { + throw new Error('Upload already started') + } + + this.startTime = Date.now() + this.#file = await getFile(this.#fileHandle) + this.totalBytes = this.#file.size + this.uploadedBytes = 0 + this.status = UploadStatus.SCHEDULED + + try { + if (this.isChunked) { + this.numberOfChunks = Math.ceil(this.totalBytes / getMaxChunksSize(this.totalBytes)) + await this.#uploadChunked(queue) + } else { + queue.add(() => this.#upload()) + } + } catch (error) { + this.cancel() + if (error instanceof UploadCancelledError || error instanceof UploadFailedError) { + throw error + } + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } + } + + /** + * Internal implementation of the upload process for non-chunked uploads. + */ + async #upload() { + this.status = UploadStatus.UPLOADING + const chunk = await getChunk(this.#file!, 0, this.#file!.size) + try { + await this.#uploadChunk(chunk, this.source) + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + } + + /** + * Internal implementation of the upload process for chunked uploads. + * + * @param queue - The job queue to throttle number of concurrent chunk uploads + */ + async #uploadChunked(queue: PQueue) { + this.status = UploadStatus.UPLOADING + const temporaryUrl = await initChunkWorkspace(this.source, 5, isPublicShare(), this.#customHeaders) + + const promises: Promise[] = [] + for (let i = 0; i < this.numberOfChunks; i++) { + const chunk = await getChunk(this.#file!, i * getMaxChunksSize(this.totalBytes), (i + 1) * getMaxChunksSize(this.totalBytes)) + promises.push(queue.add(() => this.#uploadChunk(chunk, join(temporaryUrl, String(i))))) + } + this.status = UploadStatus.UPLOADING + + queue.add(async () => { + try { + await Promise.all(promises) + // Send the assemble request + this.status = UploadStatus.ASSEMBLING + await queue.add(async () => { + await axios.request({ + method: 'MOVE', + url: `${temporaryUrl}/.file`, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#file!), + 'OC-Total-Length': this.#file!.size, + Destination: this.source, + }, + }) + }) + this.status = UploadStatus.FINISHED + } catch (error) { + this.cancel() + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + }) + } + + /** + * Internal helper to share logic for uploading a chunk of data for both chunked and non-chunked uploads. + * + * @param chunk - The chunk to upload + * @param url - The target URL + */ + async #uploadChunk(chunk: Blob, url: string) { + try { + await uploadData( + url, + chunk, + { + signal: this.signal, + onUploadProgress: ({ bytes }) => { + // As this is only the sent bytes not the processed ones we only count 90%. + // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. + this.uploadedBytes += bytes * 0.9 + this.dispatchTypedEvent('progress', new CustomEvent('progress', { detail: this })) + }, + onUploadRetry: () => { + this.uploadedBytes = 0 + }, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#file!), + 'Content-Type': this.#file!.type, + }, + }, + ) + + // Update progress - now we set the uploaded size to 100% of the file size + this.uploadedBytes = this.totalBytes + this.status = UploadStatus.FINISHED + } catch (error) { + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } + + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } + } +} + +/** + * Converts a FileSystemFileEntry to a File if needed and returns it. + * + * @param fileHandle - The file handle + */ +async function getFile(fileHandle: File | FileSystemFileEntry): Promise { + if (fileHandle instanceof File) { + return fileHandle + } + + return await new Promise((resolve, reject) => fileHandle.file(resolve, reject)) +} diff --git a/lib/upload/uploader/UploadFileTree.ts b/lib/upload/uploader/UploadFileTree.ts new file mode 100644 index 00000000..1c34c622 --- /dev/null +++ b/lib/upload/uploader/UploadFileTree.ts @@ -0,0 +1,212 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type PQueue from 'p-queue' +import type { IUpload, TUploadStatus } from './Upload.ts' + +import axios, { isAxiosError } from '@nextcloud/axios' +import { basename, join } from '@nextcloud/paths' +import { UploadCancelledError } from '../errors/UploadCancelledError.ts' +import { UploadFailedError } from '../errors/UploadFailedError.ts' +import { Directory as FileTree } from '../utils/fileTree.ts' +import { getMtimeHeader, isRequestAborted } from '../utils/requests.ts' +import { Upload, UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' + +/** + * Callback type for conflict resolution when uploading a folder tree. + * + * The callback receives the nodes in the current folder and the current path to upload to, + * it should return a list of nodes that should be uploaded (e.g. after resolving conflicts by renaming or selecting which files to upload). + * In case the upload should be cancelled, it should return `false`. + * The returned mapping allowes to resolve conflicts by renaming files or folders before upload, + * the key is the original name of the node and the value is the name to upload it as. + * + * @param nodes - The nodes to upload (list of filenames) + * @param currentPath - The current path to upload to + * @return A promise that resolves to a list of nodes that should be uploaded or false if the upload should be cancelled + */ +export type ConflictsCallback = (nodes: string[], currentPath: string) => Promise> + +/** + * A class representing a single file to be uploaded + */ +export class UploadFileTree extends Upload implements IUpload { + /** Customer headers passed */ + #customHeaders: Record + /** The current file tree to upload */ + #directory: FileTree + /** Whether chunking is disabled */ + #noChunking: boolean + /** Children uploads of this parent folder upload */ + #children: (Upload & IUpload)[] = [] + /** The callback to handle conflicts */ + #conflictsCallback?: ConflictsCallback + + /** Whether we need to check for conflicts or not (newly created parent folders = no conflict resolution needed) */ + protected needConflictResolution = true + + public source: string + public status: TUploadStatus = UploadStatus.INITIALIZED + public startTime?: number + public totalBytes: number = 0 + public uploadedBytes: number = -1 + + constructor( + destination: string, + directory: FileTree, + options: { + callback?: ConflictsCallback + headers?: Record + noChunking?: boolean + }, + ) { + super() + const { + headers = {}, + noChunking = false, + } = options + + // exposed state + this.source = destination + this.#directory = directory + this.#customHeaders = headers + this.#noChunking = noChunking + + this.signal.addEventListener('abort', () => { + for (const child of this.#children) { + child.cancel() + } + if (this.status !== UploadStatus.FAILED) { + this.status = UploadStatus.CANCELLED + } + }) + } + + get isChunked(): boolean { + return false + } + + get children(): IUpload[] { + return [...this.#children] + } + + /** + * Set up all child uploads for this upload tree. + */ + initialize(): (Upload & IUpload)[] { + const grandchildren: (Upload & IUpload)[] = [] + for (const child of this.#directory.children) { + if (child instanceof FileTree) { + const upload = new UploadFileTree( + join(this.source, child.originalName), + child, + { + callback: this.#conflictsCallback, + headers: this.#customHeaders, + noChunking: this.#noChunking, + }, + ) + this.#children.push(upload) + grandchildren.push(...upload.initialize()) + } else { + const upload = new UploadFile( + join(this.source, child.name), + child, + { headers: this.#customHeaders, noChunking: this.#noChunking }, + ) + this.#children.push(upload) + } + } + return [...this.#children, ...grandchildren] + } + + async start(queue: PQueue): Promise { + if (this.status !== UploadStatus.INITIALIZED) { + throw new Error('Upload already started') + } + this.status = UploadStatus.SCHEDULED + this.startTime = Date.now() + this.uploadedBytes = 0 + + this.status = UploadStatus.UPLOADING + await this.#createDirectory(queue) + if (this.needConflictResolution && this.#conflictsCallback) { + const nodes = await this.#conflictsCallback( + this.#directory.children.map((node) => basename(node.name)), + this.source, + ) + if (nodes === false) { + this.cancel() + return + } + + for (const [originalName, newName] of Object.entries(nodes)) { + const upload = this.#children.find((child) => basename(child.source) === originalName) + if (upload) { + Object.defineProperty(upload, 'source', { value: join(this.source, newName) }) + } + } + } + + const uploads: Promise[] = [] + for (const upload of this.#children) { + uploads.push(upload.start(queue)) + // for folder tree uploads store the conflict resolution state to prevent useless requests + if (upload instanceof UploadFileTree) { + upload.needConflictResolution = this.needConflictResolution + } + } + + try { + await Promise.all(uploads) + this.status = UploadStatus.FINISHED + } catch (error) { + this.cancel() + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } else if (error instanceof UploadCancelledError) { + this.status = UploadStatus.CANCELLED + throw error + } else if (error instanceof UploadFailedError) { + this.status = UploadStatus.FAILED + throw error + } + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + } + + /** + * Helper to create the directory for this tree. + * + * @param queue - The job queue + */ + async #createDirectory(queue: PQueue): Promise { + await queue.add(async () => { + try { + await axios.request({ + method: 'MKCOL', + url: this.source, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#directory), + }, + signal: this.signal, + }) + // MKCOL worked so this is a new directory, no conflict resolution needed + this.needConflictResolution = false + } catch (error) { + // ignore 405 Method Not Allowed as it means the directory already exists and we can continue with uploading the children + if (isAxiosError(error) && error.response?.status === 405) { + this.needConflictResolution = true + return + } + throw error + } + }) + } +} diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts new file mode 100644 index 00000000..e63959fb --- /dev/null +++ b/lib/upload/uploader/Uploader.ts @@ -0,0 +1,359 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { IFolder } from '../../node/folder.ts' +import type { IUpload } from './Upload.ts' +import type { ConflictsCallback } from './UploadFileTree.ts' + +import { getCurrentUser } from '@nextcloud/auth' +import PQueue from 'p-queue' +import { TypedEventTarget } from 'typescript-event-target' +import { defaultRemoteURL, defaultRootPath } from '../../dav/dav.ts' +import { FileType, Folder } from '../../node/index.ts' +import { Permission } from '../../permissions.ts' +import logger from '../../utils/logger.ts' +import { getMaxChunksSize, getMaxParallelUploads } from '../utils/config.ts' +import { Directory } from '../utils/fileTree.ts' +import { Eta } from './Eta.ts' +import { UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' +import { UploadFileTree } from './UploadFileTree.ts' + +export const UploaderStatus = Object.freeze({ + IDLE: 0, + UPLOADING: 1, + PAUSED: 2, +}) + +type TUploaderStatus = typeof UploaderStatus[keyof typeof UploaderStatus] + +interface BaseOptions { + /** + * Abort signal to cancel the upload + */ + signal?: AbortSignal +} + +interface UploadOptions extends BaseOptions { + /** + * The root folder where to upload. + * Allows to override the current root of the uploader for this upload + */ + root?: string + + /** + * Number of retries for the upload + * + * @default 5 + */ + retries?: number +} + +interface BatchUploadOptions extends UploadOptions { + callback?: ConflictsCallback +} + +interface UploaderEventsMap { + /** + * Dispatched when the uploader is paused + */ + paused: CustomEvent + /** + * Dispatched when the uploader is resumed + */ + resumed: CustomEvent + /** + * Dispatched when the uploader has finished all uploads (successfully, failed or cancelled) + */ + finished: CustomEvent + + /** + * Dispatched when a new upload has been started. + */ + uploadStarted: CustomEvent + /** + * Dispatched when an upload has made progress (e.g. a chunk has been uploaded). + */ + uploadProgress: CustomEvent + /** + * Dispatched when an upload has finished (successfully, failed or cancelled). + */ + uploadFinished: CustomEvent +} + +export class Uploader extends TypedEventTarget { + #eta = new Eta() + #destinationFolder: IFolder + #customHeaders: Map = new Map() + #status: TUploaderStatus = UploaderStatus.IDLE + #uploadQueue: IUpload[] = [] + #jobQueue: PQueue = new PQueue({ + concurrency: getMaxParallelUploads(), + }) + + /** + * Initialize uploader + * + * @param isPublic are we in public mode ? + * @param destinationFolder the context folder to operate, relative to the root folder + */ + constructor( + isPublic = false, + destinationFolder?: IFolder, + ) { + super() + if (!destinationFolder) { + const source = `${defaultRemoteURL}${defaultRootPath}` + let owner: string + + if (isPublic) { + owner = 'anonymous' + } else { + const user = getCurrentUser()?.uid + if (!user) { + throw new Error('User is not logged in') + } + owner = user + } + + destinationFolder = new Folder({ + id: 0, + owner, + permissions: Permission.ALL, + root: defaultRootPath, + source, + }) + } + this.#destinationFolder = destinationFolder + + logger.debug('Upload workspace initialized', { + root: this.#destinationFolder.source, + isPublic, + maxChunksSize: getMaxChunksSize(), + }) + } + + public get status(): TUploaderStatus { + return this.#status + } + + /** + * Get the upload destination folder + */ + public get destination(): IFolder { + return this.#destinationFolder + } + + /** + * Set the upload destination path relative to the root folder + */ + public set destination(folder: IFolder) { + if (!folder || folder.type !== FileType.Folder || !folder.source) { + throw new Error('Invalid destination folder') + } + + logger.debug('Destination set', { folder }) + this.#destinationFolder = folder + } + + /** + * Get registered custom headers for uploads + */ + public get customHeaders(): Map { + return structuredClone(this.#customHeaders) + } + + /** + * Set a custom header + * + * @param name The header to set + * @param value The string value + */ + public setCustomHeader(name: string, value: string = ''): void { + this.#customHeaders.set(name, value) + } + + /** + * Unset a custom header + * + * @param name The header to unset + */ + public deleteCustomerHeader(name: string): void { + this.#customHeaders.delete(name) + } + + /** + * Get the upload queue + */ + public get queue(): IUpload[] { + return [...this.#uploadQueue] + } + + /** + * Pause the uploader. + * Already started uploads will continue, but all other (not yet started) uploads + * will be paused until `start()` is called. + */ + public async pause() { + this.#jobQueue.pause() + this.#status = UploaderStatus.PAUSED + await this.#jobQueue.onPendingZero() + this.dispatchTypedEvent('paused', new CustomEvent('paused')) + logger.debug('Uploader paused') + } + + /** + * Resume any pending upload(s) + */ + public start() { + this.#jobQueue.start() + this.#status = UploaderStatus.UPLOADING + this.dispatchTypedEvent('resumed', new CustomEvent('resumed')) + logger.debug('Uploader resumed') + } + + public reset() { + for (const upload of this.#uploadQueue) { + upload.cancel() + } + + this.#uploadQueue = [] + this.#jobQueue.clear() + this.#eta.reset() + this.#status = UploaderStatus.IDLE + logger.debug('Uploader reset') + } + + /** + * Uploads multiple files or folders while preserving the relative path (if available) + * + * @param destination The destination path relative to the root folder. e.g. /foo/bar (a file "a.txt" will be uploaded then to "/foo/bar/a.txt") + * @param files The files and/or folders to upload + * @param options - optional parameters + * @param options.callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) + * @throws {UploadCancelledError} - If the upload was canceled by the user via the abort signal + * + * @example + * ```ts + * // For example this is from handling the onchange event of an input[type=file] + * async handleFiles(files: File[]) { + * this.uploads = await this.uploader.batchUpload('uploads', files, { callback: this.handleConflicts }) + * } + * + * async handleConflicts(nodes: File[], currentPath: string) { + * const conflicts = getConflicts(nodes, this.fetchContent(currentPath)) + * if (conflicts.length === 0) { + * // No conflicts so upload all + * return nodes + * } else { + * // Open the conflict picker to resolve conflicts + * try { + * const { selected, renamed } = await openConflictPicker(currentPath, conflicts, this.fetchContent(currentPath), { recursive: true }) + * return [...selected, ...renamed] + * } catch (e) { + * return false + * } + * } + * } + * ``` + */ + public async batchUpload( + destination: string, + files: (File | FileSystemEntry)[], + options?: BatchUploadOptions, + ): Promise { + const rootFolder = new Directory('') + await rootFolder.addChildren(files) + // create a meta upload to ensure all ongoing child requests are listed + const target = `${this.destination.source.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const headers = Object.fromEntries(this.#customHeaders.entries()) + const upload = new UploadFileTree( + target, + rootFolder, + { ...options, headers }, + ) + if (options?.signal) { + options.signal.addEventListener('abort', upload.cancel) + } + + const uploads = [...upload.initialize(), upload] + for (const upload of uploads) { + this.#attachEventListeners(upload) + } + this.#uploadQueue.push(...uploads) + this.dispatchTypedEvent('uploadStarted', new CustomEvent('uploadStarted', { detail: upload })) + await upload.start(this.#jobQueue) + return uploads + } + + /** + * Upload a file to the given path + * + * @param destination - The destination path relative to the root folder. e.g. /foo/bar.txt + * @param fileHandle - The file to upload + * @param options - Optional parameters + */ + public async upload(destination: string, fileHandle: File | FileSystemFileEntry, options?: UploadOptions): Promise { + const target = `${this.destination.source.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const headers = Object.fromEntries(this.#customHeaders.entries()) + const upload = new UploadFile(target, fileHandle, { ...options, headers }) + if (options?.signal) { + options.signal.addEventListener('abort', upload.cancel) + } + + this.#attachEventListeners(upload) + this.#uploadQueue.push(upload) + this.dispatchTypedEvent('uploadStarted', new CustomEvent('uploadStarted', { detail: upload })) + await upload.start(this.#jobQueue) + return upload + } + + /** + * Handle the progress event of an upload. + * Update the ETA and dispatch a progress event for the uploader. + * + * @param event - The progress event of an upload + */ + #onProgress(event: CustomEvent) { + const totalBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.totalBytes, 0) + const uploadedBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.uploadedBytes, 0) + this.#eta.update(uploadedBytes, totalBytes) + this.dispatchTypedEvent('uploadProgress', new CustomEvent('uploadProgress', { detail: event.detail })) + } + + /** + * Handle the finished event of an upload. + * + * 1. Update the progress + * 2. if all uploads are finished dispatch a finished event for the uploader and clear the queue + * + * @param event - The finished event of an upload + */ + async #onFinished(event: CustomEvent) { + this.#onProgress(event) + this.dispatchTypedEvent('uploadFinished', new CustomEvent('uploadFinished', { detail: event.detail })) + + const finalStates = [ + UploadStatus.FINISHED, + UploadStatus.CANCELLED, + UploadStatus.FAILED, + ] as number[] + if (this.#uploadQueue.every((upload) => finalStates.includes(upload.status))) { + await this.#jobQueue.onIdle() + this.dispatchTypedEvent('finished', new CustomEvent('finished')) + this.reset() + } + } + + /** + * Attach progress listeners to an upload. + * + * @param upload - The upload to attach listeners to + */ + #attachEventListeners(upload: IUpload) { + upload.addEventListener('progress', this.#onProgress) + upload.addEventListener('finished', this.#onFinished) + } +} diff --git a/lib/upload/uploader/index.ts b/lib/upload/uploader/index.ts new file mode 100644 index 00000000..3f4f94d2 --- /dev/null +++ b/lib/upload/uploader/index.ts @@ -0,0 +1,11 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +export type { Eta } from './Eta.ts' +export type { IUpload } from './Upload.ts' + +export { EtaStatus } from './Eta.ts' +export { UploadStatus } from './Upload.ts' +export { Uploader, UploaderStatus } from './Uploader.ts' diff --git a/lib/upload/utils/config.ts b/lib/upload/utils/config.ts new file mode 100644 index 00000000..cddefe39 --- /dev/null +++ b/lib/upload/utils/config.ts @@ -0,0 +1,50 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { getCapabilities } from '@nextcloud/capabilities' + +/** + * Get the maximum number of parallel uploads based on the server configuration. + */ +export function getMaxParallelUploads(): number { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const capabilities = getCapabilities() as Record + return capabilities.files?.chunked_upload?.max_parallel_count ?? 5 +} + +/** + * Checks if the server supports chunking for public shares. + */ +export function supportsPublicChunking(): boolean { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (getCapabilities() as Record).dav?.public_shares_chunking ?? false +} + +/** + * Get the maximum chunk size for chunked uploads based on the server configuration and file size. + * + * @param fileSize - The size of the file to be uploaded. If not provided, the function will return the default chunk size. + */ +export function getMaxChunksSize(fileSize: number | undefined = undefined): number { + const maxChunkSize = window.OC?.appConfig?.files?.max_chunk_size + if (maxChunkSize <= 0) { + return 0 + } + + // If invalid return default + if (!Number(maxChunkSize)) { + return 10 * 1024 * 1024 + } + + // v2 of chunked upload requires chunks to be 5 MB at minimum + const minimumChunkSize = Math.max(Number(maxChunkSize), 5 * 1024 * 1024) + + if (fileSize === undefined) { + return minimumChunkSize + } + + // Adapt chunk size to fit the file in 10000 chunks for chunked upload v2 + return Math.max(minimumChunkSize, Math.ceil(fileSize / 10000)) +} diff --git a/lib/upload/utils/fileTree.ts b/lib/upload/utils/fileTree.ts new file mode 100644 index 00000000..b9cbbd65 --- /dev/null +++ b/lib/upload/utils/fileTree.ts @@ -0,0 +1,127 @@ +/*! + * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +/** + * Helpers to generate a file tree when the File and Directory API is used (e.g. Drag and Drop or ) + */ + +import { basename } from '@nextcloud/paths' +import { isFileSystemDirectoryEntry, isFileSystemFileEntry } from './filesystem.ts' + +/** + * This is a helper class to allow building a file tree for uploading + * It allows to create virtual directories + */ +export class Directory extends File { + private _originalName: string + private _path: string + private _children: Map + + constructor(path: string) { + super([], basename(path), { type: 'httpd/unix-directory', lastModified: 0 }) + this._children = new Map() + this._originalName = basename(path) + this._path = path + } + + get size(): number { + return this.children.reduce((sum, file) => sum + file.size, 0) + } + + get lastModified(): number { + return this.children.reduce((latest, file) => Math.max(latest, file.lastModified), 0) + } + + // We need this to keep track of renamed files + get originalName(): string { + return this._originalName + } + + get children(): Array { + return Array.from(this._children.values()) + } + + get webkitRelativePath(): string { + return this._path + } + + getChild(name: string): File | Directory | null { + return this._children.get(name) ?? null + } + + /** + * Add multiple children at once + * + * @param files The files to add + */ + async addChildren(files: Array): Promise { + for (const file of files) { + await this.addChild(file) + } + } + + /** + * Add a child to the directory. + * If it is a nested child the parents will be created if not already exist. + * + * @param file The child to add + */ + async addChild(file: File | FileSystemEntry) { + const rootPath = this._path && `${this._path}/` + if (isFileSystemFileEntry(file)) { + file = await new Promise((resolve, reject) => (file as FileSystemFileEntry).file(resolve, reject)) + } else if (isFileSystemDirectoryEntry(file)) { + const reader = file.createReader() + const entries = await new Promise((resolve, reject) => reader.readEntries(resolve, reject)) + + // Create a new child directory and add the entries + const child = new Directory(`${rootPath}${file.name}`) + await child.addChildren(entries) + this._children.set(file.name, child) + return + } + + // Make Typescript calm - we ensured it is not a file system entry above. + file = file as File + + const filePath = file.webkitRelativePath ?? file.name + // Handle plain files + if (!filePath.includes('/')) { + // Direct child of the directory + this._children.set(file.name, file) + } else { + // Check if file is a child + if (!filePath.startsWith(this._path)) { + throw new Error(`File ${filePath} is not a child of ${this._path}`) + } + + // If file is a child check if we need to nest it + const relPath = filePath.slice(rootPath.length) + const name = basename(relPath) + + if (name === relPath) { + // It is a direct child so we can add it + this._children.set(name, file) + } else { + // It is not a direct child so we need to create intermediate nodes + const base = relPath.slice(0, relPath.indexOf('/')) + if (this._children.has(base)) { + // It is a grandchild so we can add it directly + await (this._children.get(base) as Directory).addChild(file) + } else { + // We do not know any parent of that child + // so we need to add a new child on the current level + const child = new Directory(`${rootPath}${base}`) + await child.addChild(file) + this._children.set(base, child) + } + } + } + } +} + +/** + * Interface of the internal Directory class + */ +export type IDirectory = Pick diff --git a/lib/upload/utils/filesystem.ts b/lib/upload/utils/filesystem.ts new file mode 100644 index 00000000..0602cc49 --- /dev/null +++ b/lib/upload/utils/filesystem.ts @@ -0,0 +1,12 @@ +/*! + * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +// Helpers for the File and Directory API + +// Helper to support browser that do not support the API +export const isFileSystemDirectoryEntry = (o: unknown): o is FileSystemDirectoryEntry => 'FileSystemDirectoryEntry' in window && o instanceof FileSystemDirectoryEntry + +export const isFileSystemFileEntry = (o: unknown): o is FileSystemFileEntry => 'FileSystemFileEntry' in window && o instanceof FileSystemFileEntry + +export const isFileSystemEntry = (o: unknown): o is FileSystemEntry => 'FileSystemEntry' in window && o instanceof FileSystemEntry diff --git a/lib/upload/utils/requests.spec.ts b/lib/upload/utils/requests.spec.ts new file mode 100644 index 00000000..4632d2bd --- /dev/null +++ b/lib/upload/utils/requests.spec.ts @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-or-later + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + */ + +import { CanceledError } from 'axios' +import { expect, test } from 'vitest' +import { getMtimeHeader, isRequestAborted } from './requests.ts' + +test('getMtimeHeader - valid mtime', () => { + const file = new File([''], 'test.txt', { lastModified: 1620000000000 }) + const headers = getMtimeHeader(file) + expect(headers).toHaveProperty('X-OC-Mtime', 1620000000) +}) + +test('getMtimeHeader - invalid mtime', () => { + const file = new File([''], 'test.txt', { lastModified: -1000 }) + const headers = getMtimeHeader(file) + expect(headers).not.toHaveProperty('X-OC-Mtime') +}) + +test('isRequestAborted - axios cancel error', () => { + const error = new CanceledError('Upload cancelled') + expect(isRequestAborted(error)).toBe(true) +}) + +test('isRequestAborted - DOMException with AbortError name', () => { + const error = new DOMException('Upload cancelled', 'AbortError') + expect(isRequestAborted(error)).toBe(true) +}) + +test('isRequestAborted - other error', () => { + const error = new Error('Some other error') + expect(isRequestAborted(error)).toBe(false) +}) diff --git a/lib/upload/utils/requests.ts b/lib/upload/utils/requests.ts new file mode 100644 index 00000000..36fe4e17 --- /dev/null +++ b/lib/upload/utils/requests.ts @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-or-later + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + */ + +import { isCancel } from '@nextcloud/axios' + +/** + * Create modification time headers if valid value is available. + * It can be invalid on Android devices if SD cards with NTFS / FAT are used, + * as those files might use the NT epoch for time so the value will be negative. + * + * @param file - The file to upload + */ +export function getMtimeHeader(file: File): { 'X-OC-Mtime'?: number } { + const mtime = Math.floor(file.lastModified / 1000) + if (mtime > 0) { + return { 'X-OC-Mtime': mtime } + } + return {} +} + +/** + * Check if the given error is an abort error + * + * @param error - Error to check + */ +export function isRequestAborted(error: unknown): boolean { + return isCancel(error) + || (error instanceof DOMException && error.name === 'AbortError') +} diff --git a/lib/upload/utils/upload.ts b/lib/upload/utils/upload.ts new file mode 100644 index 00000000..83f28d16 --- /dev/null +++ b/lib/upload/utils/upload.ts @@ -0,0 +1,154 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosError, AxiosProgressEvent, AxiosResponse } from 'axios' + +import { getCurrentUser } from '@nextcloud/auth' +import axios from '@nextcloud/axios' +import { generateRemoteUrl, getBaseUrl } from '@nextcloud/router' +import { getSharingToken } from '@nextcloud/sharing/public' +import axiosRetry, { exponentialDelay, isNetworkOrIdempotentRequestError } from 'axios-retry' +import logger from '../../utils/logger.ts' + +axiosRetry(axios, { retries: 0 }) + +type UploadData = Blob | (() => Promise) + +interface UploadDataOptions { + /** The abort signal */ + signal: AbortSignal + /** Upload progress event callback */ + onUploadProgress?: (event: AxiosProgressEvent) => void + /** Request retry callback (e.g. network error of previous try) */ + onUploadRetry?: () => void + /** The final destination file (for chunked uploads) */ + destinationFile?: string + /** Additional headers */ + headers?: Record + /** Number of retries */ + retries?: number +} + +/** + * Upload some data to a given path + * + * @param url the url to upload to + * @param uploadData the data to upload + * @param uploadOptions upload options + */ +export async function uploadData( + url: string, + uploadData: UploadData, + uploadOptions: UploadDataOptions, +): Promise { + const options = { + headers: {}, + onUploadProgress: () => {}, + onUploadRetry: () => {}, + retries: 5, + ...uploadOptions, + } + + let data: Blob + + // If the upload data is a blob, we can directly use it + // Otherwise, we need to wait for the promise to resolve + if (uploadData instanceof Blob) { + data = uploadData + } else { + data = await uploadData() + } + + // Helps the server to know what to do with the file afterwards (e.g. chunked upload) + if (options.destinationFile) { + options.headers.Destination = options.destinationFile + } + + // If no content type is set, we default to octet-stream + if (!options.headers['Content-Type']) { + options.headers['Content-Type'] = 'application/octet-stream' + } + + return await axios.request({ + method: 'PUT', + url, + data, + signal: options.signal, + onUploadProgress: options.onUploadProgress, + headers: options.headers, + 'axios-retry': { + retries: options.retries, + retryDelay: (retryCount: number, error: AxiosError) => exponentialDelay(retryCount, error, 1000), + retryCondition(error: AxiosError): boolean { + // Do not retry on insufficient storage - this is permanent + if (error.status === 507) { + return false + } + // Do a retry on locked error as this is often just some preview generation + if (error.status === 423) { + return true + } + // Otherwise fallback to default behavior + return isNetworkOrIdempotentRequestError(error) + }, + onRetry: options.onUploadRetry, + }, + }) +} + +/** + * Get chunk of the file. + * Doing this on the fly give us a big performance boost and proper garbage collection + * + * @param file File to upload + * @param start Offset to start upload + * @param length Size of chunk to upload + */ +export function getChunk(file: File, start: number, length: number): Promise { + if (start === 0 && file.size <= length) { + return Promise.resolve(new Blob([file], { type: file.type || 'application/octet-stream' })) + } + + return Promise.resolve(new Blob([file.slice(start, start + length)], { type: 'application/octet-stream' })) +} + +/** + * Create a temporary upload workspace to upload the chunks to + * + * @param destinationFile The file name after finishing the chunked upload + * @param retries number of retries + * @param isPublic whether this upload is in a public share or not + * @param customHeaders Custom HTTP headers used when creating the workspace (e.g. X-NC-Nickname for file drops) + */ +export async function initChunkWorkspace(destinationFile: string | undefined = undefined, retries: number = 5, isPublic: boolean = false, customHeaders: Record = {}): Promise { + let chunksWorkspace: string + if (isPublic) { + chunksWorkspace = `${getBaseUrl()}/public.php/dav/uploads/${getSharingToken()}` + } else { + chunksWorkspace = generateRemoteUrl(`dav/uploads/${getCurrentUser()?.uid}`) + } + + const hash = [...Array(16)].map(() => Math.floor(Math.random() * 16).toString(16)).join('') + const tempWorkspace = `web-file-upload-${hash}` + const url = `${chunksWorkspace}/${tempWorkspace}` + const headers = customHeaders + if (destinationFile) { + headers.Destination = destinationFile + } + + await axios.request({ + method: 'MKCOL', + url, + headers, + 'axios-retry': { + retries, + retryDelay: (retryCount: number, error: AxiosError) => exponentialDelay(retryCount, error, 1000), + }, + }) + + logger.debug('Created temporary upload workspace', { url }) + + return url +} diff --git a/lib/window.d.ts b/lib/window.d.ts index 519145e3..6dd4ac3e 100644 --- a/lib/window.d.ts +++ b/lib/window.d.ts @@ -6,7 +6,13 @@ declare global { interface Window { - OC: Nextcloud.v32.OC + OC: Nextcloud.v32.OC & { + appConfig: { + files: { + max_chunk_size: number + } + } + } // eslint-disable-next-line @typescript-eslint/no-explicit-any OCA: any _nc_files_scope?: Record> diff --git a/package-lock.json b/package-lock.json index 251bce25..da28ed4c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,13 +10,16 @@ "license": "AGPL-3.0-or-later", "dependencies": { "@nextcloud/auth": "^2.5.3", + "@nextcloud/axios": "^2.5.2", "@nextcloud/capabilities": "^1.2.1", "@nextcloud/l10n": "^3.4.1", "@nextcloud/logger": "^3.0.3", "@nextcloud/paths": "^3.0.0", "@nextcloud/router": "^3.1.0", "@nextcloud/sharing": "^0.4.0", + "axios-retry": "^4.5.0", "is-svg": "^6.1.0", + "p-queue": "^9.1.0", "typescript-event-target": "^1.1.2", "webdav": "^5.9.0" }, @@ -202,6 +205,7 @@ "integrity": "sha512-lWBYIrF7qK5+GjY5Uy+/hEgp8OJWOD/rpy74GplYRhEauvbHDeFB8t5hPOZxCZ0Oxf4Cc36tK51/l3ymJysrKw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.26.2", @@ -545,6 +549,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=20.19.0" }, @@ -585,6 +590,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=20.19.0" } @@ -1127,7 +1133,6 @@ "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", @@ -1143,7 +1148,6 @@ "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@eslint/core": "^0.17.0" }, @@ -1170,7 +1174,6 @@ "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", @@ -1195,7 +1198,6 @@ "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -1209,7 +1211,6 @@ "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -1239,7 +1240,6 @@ "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -1316,7 +1316,6 @@ "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=18.18.0" } @@ -1327,7 +1326,6 @@ "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" @@ -1342,7 +1340,6 @@ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=12.22" }, @@ -1367,7 +1364,6 @@ "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=18.18" }, @@ -1606,6 +1602,20 @@ "node": "^20.0.0 || ^22.0.0 || ^24.0.0" } }, + "node_modules/@nextcloud/axios": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/@nextcloud/axios/-/axios-2.5.2.tgz", + "integrity": "sha512-8frJb77jNMbz00TjsSqs1PymY0nIEbNM4mVmwen2tXY7wNgRai6uXilIlXKOYB9jR/F/HKRj6B4vUwVwZbhdbw==", + "license": "GPL-3.0-or-later", + "dependencies": { + "@nextcloud/auth": "^2.5.1", + "@nextcloud/router": "^3.0.1", + "axios": "^1.12.2" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || ^24.0.0" + } + }, "node_modules/@nextcloud/browser-storage": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/@nextcloud/browser-storage/-/browser-storage-0.5.0.tgz", @@ -1908,6 +1918,7 @@ "integrity": "sha512-dKYCMuPO1bmrpuogcjQ8z7ICCH3FP6WmxpwC03yjzGfZhj9fTJg6+bS1+UAplekbN2C+M61UNllGOOoAfGCrdQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@octokit/auth-token": "^4.0.0", "@octokit/graphql": "^7.1.0", @@ -2072,7 +2083,6 @@ "hasInstallScript": true, "license": "MIT", "optional": true, - "peer": true, "dependencies": { "detect-libc": "^1.0.3", "is-glob": "^4.0.3", @@ -2115,7 +2125,6 @@ "os": [ "android" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2137,7 +2146,6 @@ "os": [ "darwin" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2159,7 +2167,6 @@ "os": [ "darwin" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2181,7 +2188,6 @@ "os": [ "freebsd" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2203,7 +2209,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2225,7 +2230,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2247,7 +2251,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2269,7 +2272,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2291,7 +2293,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2313,7 +2314,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2335,7 +2335,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2357,7 +2356,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2379,7 +2377,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2812,6 +2809,7 @@ "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.3", "json-schema-traverse": "^1.0.0", @@ -3120,6 +3118,7 @@ "integrity": "sha512-m0jEgYlYz+mDJZ2+F4v8D1AyQb+QzsNqRuI7xg1VQX/KlKS0qT9r1Mo16yo5F/MtifXFgaofIFsdFMox2SxIbQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -3194,6 +3193,7 @@ "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.54.0", "@typescript-eslint/types": "8.54.0", @@ -3641,7 +3641,6 @@ "integrity": "sha512-5aBjvGqsWs+MoxswZPoTB9nSDb3dhd1x30xrrltKujlCxo48j8HGDNj3QPhF4VIS0VQDUrA1xUfp2hEa+FNyXA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/parser": "^7.28.0", "@vue/compiler-core": "3.5.18", @@ -3660,7 +3659,6 @@ "integrity": "sha512-xM16Ak7rSWHkM3m22NlmcdIM+K4BMyFARAfV9hYFl+SFuRzrZ3uGMNW05kA5pmeMa0X9X963Kgou7ufdbpOP9g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.18", "@vue/shared": "3.5.18" @@ -3734,7 +3732,6 @@ "integrity": "sha512-x0vPO5Imw+3sChLM5Y+B6G1zPjwdOri9e8V21NnTnlEvkxatHEH5B5KEAJcjuzQ7BsjGrKtfzuQ5eQwXh8HXBg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/shared": "3.5.18" } @@ -3745,7 +3742,6 @@ "integrity": "sha512-DUpHa1HpeOQEt6+3nheUfqVXRog2kivkXHUhoqJiKR33SO4x+a5uNOMkV487WPerQkL0vUuRvq/7JhRgLW3S+w==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/reactivity": "3.5.18", "@vue/shared": "3.5.18" @@ -3757,7 +3753,6 @@ "integrity": "sha512-YwDj71iV05j4RnzZnZtGaXwPoUWeRsqinblgVJwR8XTXYZ9D5PbahHQgsbmzUvCWNF6x7siQ89HgnX5eWkr3mw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/reactivity": "3.5.18", "@vue/runtime-core": "3.5.18", @@ -3771,7 +3766,6 @@ "integrity": "sha512-PvIHLUoWgSbDG7zLHqSqaCoZvHi6NNmfVFOqO+OnwvqMz/tqQr3FuGWS8ufluNddk7ZLBJYMrjcw1c6XzR12mA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-ssr": "3.5.18", "@vue/shared": "3.5.18" @@ -3793,6 +3787,7 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -3825,7 +3820,6 @@ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -3971,6 +3965,12 @@ "node": ">=12" } }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, "node_modules/available-typed-arrays": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", @@ -3987,6 +3987,30 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/axios": { + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "license": "MIT", + "peer": true, + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/axios-retry": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/axios-retry/-/axios-retry-4.5.0.tgz", + "integrity": "sha512-aR99oXhpEDGo0UuAlYcn2iGRds30k366Zfa05XWScR9QaQD4JYiP3/1Qt1u7YlefUOK+cn0CcwoL1oefavQUlQ==", + "license": "Apache-2.0", + "dependencies": { + "is-retry-allowed": "^2.2.0" + }, + "peerDependencies": { + "axios": "0.x || 1.x" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -4055,7 +4079,6 @@ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -4067,7 +4090,6 @@ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "optional": true, - "peer": true, "dependencies": { "fill-range": "^7.1.1" }, @@ -4244,6 +4266,7 @@ "url": "https://github.com/sponsors/ai" } ], + "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001688", "electron-to-chromium": "^1.5.73", @@ -4342,7 +4365,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -4375,7 +4397,6 @@ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=6" } @@ -4462,7 +4483,6 @@ "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "readdirp": "^4.0.1" }, @@ -4508,6 +4528,18 @@ "dev": true, "license": "MIT" }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, "node_modules/comment-parser": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/comment-parser/-/comment-parser-1.4.5.tgz", @@ -4537,8 +4569,7 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/confbox": { "version": "0.2.2", @@ -4646,7 +4677,6 @@ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -4756,8 +4786,7 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/data-uri-to-buffer": { "version": "4.0.1", @@ -4818,8 +4847,7 @@ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/define-data-property": { "version": "1.1.4", @@ -4855,6 +4883,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/deprecation": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", @@ -4893,7 +4930,6 @@ "dev": true, "license": "Apache-2.0", "optional": true, - "peer": true, "bin": { "detect-libc": "bin/detect-libc.js" }, @@ -4959,7 +4995,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.1", @@ -5015,7 +5050,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5025,7 +5059,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "dev": true, "engines": { "node": ">= 0.4" } @@ -5041,7 +5074,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0" @@ -5050,6 +5082,21 @@ "node": ">= 0.4" } }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/esbuild": { "version": "0.25.3", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.3.tgz", @@ -5360,7 +5407,6 @@ "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -5422,7 +5468,6 @@ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "dependencies": { "estraverse": "^5.2.0" }, @@ -5462,11 +5507,16 @@ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/eventemitter3": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", + "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==", + "license": "MIT" + }, "node_modules/events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", @@ -5516,16 +5566,14 @@ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/fast-uri": { "version": "3.0.6", @@ -5590,7 +5638,6 @@ "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "flat-cache": "^4.0.0" }, @@ -5604,7 +5651,6 @@ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "optional": true, - "peer": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -5634,7 +5680,6 @@ "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5648,8 +5693,27 @@ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", "dev": true, - "license": "ISC", - "peer": true + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } }, "node_modules/for-each": { "version": "0.3.5", @@ -5667,6 +5731,22 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/formdata-polyfill": { "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", @@ -5711,7 +5791,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -5739,7 +5818,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", @@ -5764,7 +5842,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "dev": true, "license": "MIT", "dependencies": { "dunder-proto": "^1.0.1", @@ -5790,7 +5867,6 @@ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "is-glob": "^4.0.3" }, @@ -5812,7 +5888,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5865,7 +5940,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5878,7 +5952,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "dev": true, "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" @@ -5919,7 +5992,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, "dependencies": { "function-bind": "^1.1.2" }, @@ -6051,7 +6123,6 @@ "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">= 4" } @@ -6061,8 +6132,7 @@ "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.1.4.tgz", "integrity": "sha512-p6u1bG3YSnINT5RQmx/yRZBpenIl30kVxkTLDyHLIMk0gict704Q9n+thfDI7lTRm9vXdDYutVzXhzcThxTnXA==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/import-fresh": { "version": "3.3.1", @@ -6070,7 +6140,6 @@ "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -6098,7 +6167,6 @@ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=0.8.19" } @@ -6160,7 +6228,6 @@ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, - "peer": true, "engines": { "node": ">=0.10.0" } @@ -6190,7 +6257,6 @@ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, - "peer": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -6221,7 +6287,6 @@ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, "optional": true, - "peer": true, "engines": { "node": ">=0.12.0" } @@ -6265,6 +6330,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-retry-allowed": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-2.2.0.tgz", + "integrity": "sha512-XVm7LOeLpTW4jV19QSH38vkswxoLud8sQ57YwJVTPWdiaI9I8keEhGFpBlslyVsgdQy4Opg8QOLb8YRgsyZiQg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-svg": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-6.1.0.tgz", @@ -6307,8 +6384,7 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true, - "license": "ISC", - "peer": true + "license": "ISC" }, "node_modules/isomorphic-timers-promises": { "version": "1.0.1", @@ -6419,7 +6495,6 @@ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "argparse": "^2.0.1" }, @@ -6514,24 +6589,21 @@ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json5": { "version": "2.2.3", @@ -6564,7 +6636,6 @@ "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "json-buffer": "3.0.1" } @@ -6649,8 +6720,7 @@ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/lru-cache": { "version": "5.1.1", @@ -6740,7 +6810,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -6800,7 +6869,6 @@ "dev": true, "license": "MIT", "optional": true, - "peer": true, "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -6830,6 +6898,27 @@ "dev": true, "license": "MIT" }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", @@ -6850,7 +6939,6 @@ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -6960,8 +7048,7 @@ "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", "dev": true, "license": "MIT", - "optional": true, - "peer": true + "optional": true }, "node_modules/node-domexception": { "version": "1.0.0", @@ -7153,7 +7240,6 @@ "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", @@ -7203,6 +7289,34 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/p-queue": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-9.1.0.tgz", + "integrity": "sha512-O/ZPaXuQV29uSLbxWBGGZO1mCQXV2BLIwUr59JUU9SoH76mnYvtms7aafH/isNSNGwuEfP6W/4xD0/TJXxrizw==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^5.0.1", + "p-timeout": "^7.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-7.0.1.tgz", + "integrity": "sha512-AxTM2wDGORHGEkPCt8yqxOTMgpfbEHqF51f/5fJCmwFC3C/zNcGT63SymH2ttOAaiIws2zVg4+izQCjrakcwHg==", + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/package-name-regex": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/package-name-regex/-/package-name-regex-2.0.6.tgz", @@ -7229,7 +7343,6 @@ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "callsites": "^3.0.0" }, @@ -7318,7 +7431,6 @@ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=8" } @@ -7371,7 +7483,6 @@ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, "optional": true, - "peer": true, "engines": { "node": ">=8.6" }, @@ -7484,6 +7595,12 @@ "dev": true, "license": "MIT" }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, "node_modules/public-encrypt": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", @@ -7613,7 +7730,6 @@ "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">= 14.18.0" }, @@ -7673,7 +7789,6 @@ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=4" } @@ -7761,6 +7876,7 @@ "integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/estree": "1.0.8" }, @@ -8032,7 +8148,6 @@ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "shebang-regex": "^3.0.0" }, @@ -8046,7 +8161,6 @@ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=8" } @@ -8468,6 +8582,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8526,7 +8641,6 @@ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, "optional": true, - "peer": true, "dependencies": { "is-number": "^7.0.0" }, @@ -8706,6 +8820,7 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -8911,6 +9026,7 @@ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", @@ -9524,6 +9640,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9537,6 +9654,7 @@ "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "4.0.18", "@vitest/mocker": "4.0.18", @@ -9665,7 +9783,6 @@ "integrity": "sha512-CydUvFOQKD928UzZhTp4pr2vWz1L+H99t7Pkln2QSPdvmURT0MoC4wUccfCnuEaihNsu9aYYyk+bep8rlfkUXw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "debug": "^4.4.0", "eslint-scope": "^8.2.0", @@ -9690,7 +9807,6 @@ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", "dev": true, "license": "ISC", - "peer": true, "bin": { "semver": "bin/semver.js" }, @@ -9835,7 +9951,6 @@ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "isexe": "^2.0.0" }, @@ -9891,7 +10006,6 @@ "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } diff --git a/package.json b/package.json index 609b37fd..3a44bb1b 100644 --- a/package.json +++ b/package.json @@ -50,13 +50,16 @@ }, "dependencies": { "@nextcloud/auth": "^2.5.3", + "@nextcloud/axios": "^2.5.2", "@nextcloud/capabilities": "^1.2.1", "@nextcloud/l10n": "^3.4.1", "@nextcloud/logger": "^3.0.3", "@nextcloud/paths": "^3.0.0", "@nextcloud/router": "^3.1.0", "@nextcloud/sharing": "^0.4.0", + "axios-retry": "^4.5.0", "is-svg": "^6.1.0", + "p-queue": "^9.1.0", "typescript-event-target": "^1.1.2", "webdav": "^5.9.0" },