From 7e5553c0fb8ac8b633534da2494a1c209139e55b Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:28:44 +0100 Subject: [PATCH 01/14] feat(upload): merge upload classes from `@nextcloud/upload` Signed-off-by: Ferdinand Thiessen --- .../errors/UploadCancelledError.spec.ts | 17 + lib/upload/errors/UploadCancelledError.ts | 13 + lib/upload/getUploader.spec.ts | 31 + lib/upload/getUploader.ts | 25 + lib/upload/index.ts | 12 + lib/upload/upload.ts | 124 +++ lib/upload/uploader/eta.spec.ts | 316 ++++++++ lib/upload/uploader/eta.ts | 225 ++++++ lib/upload/uploader/index.ts | 15 + lib/upload/uploader/uploader.ts | 708 ++++++++++++++++++ lib/upload/utils/config.ts | 25 + lib/upload/utils/conflicts.ts | 32 + lib/upload/utils/fileTree.ts | 127 ++++ lib/upload/utils/filesystem.ts | 12 + lib/upload/utils/l10n.ts | 36 + lib/upload/utils/logger.ts | 11 + lib/upload/utils/upload.ts | 150 ++++ lib/upload/window.d.ts | 21 + package-lock.json | 326 +++++--- package.json | 3 + 20 files changed, 2123 insertions(+), 106 deletions(-) create mode 100644 lib/upload/errors/UploadCancelledError.spec.ts create mode 100644 lib/upload/errors/UploadCancelledError.ts create mode 100644 lib/upload/getUploader.spec.ts create mode 100644 lib/upload/getUploader.ts create mode 100644 lib/upload/index.ts create mode 100644 lib/upload/upload.ts create mode 100644 lib/upload/uploader/eta.spec.ts create mode 100644 lib/upload/uploader/eta.ts create mode 100644 lib/upload/uploader/index.ts create mode 100644 lib/upload/uploader/uploader.ts create mode 100644 lib/upload/utils/config.ts create mode 100644 lib/upload/utils/conflicts.ts create mode 100644 lib/upload/utils/fileTree.ts create mode 100644 lib/upload/utils/filesystem.ts create mode 100644 lib/upload/utils/l10n.ts create mode 100644 lib/upload/utils/logger.ts create mode 100644 lib/upload/utils/upload.ts create mode 100644 lib/upload/window.d.ts diff --git a/lib/upload/errors/UploadCancelledError.spec.ts b/lib/upload/errors/UploadCancelledError.spec.ts new file mode 100644 index 00000000..e270eff7 --- /dev/null +++ b/lib/upload/errors/UploadCancelledError.spec.ts @@ -0,0 +1,17 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { expect, test } from 'vitest' +import { UploadCancelledError } from './UploadCancelledError.ts' + +test('UploadCancelledError', () => { + const cause = new Error('Network error') + const error = new UploadCancelledError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadCancelledError) + expect(error.message).toBe('Upload has been cancelled') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_CANCELLED__') +}) diff --git a/lib/upload/errors/UploadCancelledError.ts b/lib/upload/errors/UploadCancelledError.ts new file mode 100644 index 00000000..8b0ad994 --- /dev/null +++ b/lib/upload/errors/UploadCancelledError.ts @@ -0,0 +1,13 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import { t } from '../utils/l10n.ts' + +export class UploadCancelledError extends Error { + + public constructor(cause?: unknown) { + super(t('Upload has been cancelled'), { cause }) + } + +} diff --git a/lib/upload/getUploader.spec.ts b/lib/upload/getUploader.spec.ts new file mode 100644 index 00000000..d5855974 --- /dev/null +++ b/lib/upload/getUploader.spec.ts @@ -0,0 +1,31 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { join } from '@nextcloud/paths' +import { expect, test } from 'vitest' +import { defaultRemoteURL, defaultRootPath } from '../dav/dav.ts' +import { scopedGlobals } from '../globalScope.ts' +import { Folder } from '../node/folder.ts' +import { getUploader } from './getUploader.ts' +import { Uploader } from './uploader/Uploader.ts' + +test('getUploader - should return the uploader instance from the global scope', async () => { + const uploader = new Uploader(false, new Folder({ owner: 'test', root: defaultRootPath, source: join(defaultRemoteURL, defaultRootPath) })) + scopedGlobals.uploader = uploader + const returnedUploader = getUploader() + expect(returnedUploader).toBe(uploader) +}) + +test('getUploader - should return the same instance on multiple calls', async () => { + const uploader1 = getUploader() + const uploader2 = getUploader() + expect(uploader1).toBe(uploader2) +}) + +test('getUploader - should not return the same instance on multiple calls with forceRecreate', async () => { + const uploader1 = getUploader(true) + const uploader2 = getUploader(true, true) + expect(uploader1).not.toBe(uploader2) +}) diff --git a/lib/upload/getUploader.ts b/lib/upload/getUploader.ts new file mode 100644 index 00000000..0d810cda --- /dev/null +++ b/lib/upload/getUploader.ts @@ -0,0 +1,25 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { isPublicShare } from '@nextcloud/sharing/public' + +import { Uploader } from './uploader/uploader.ts' + +/** + * Get the global Uploader instance. + * + * Note: If you need a local uploader you can just create a new instance, + * this global instance will be shared with other apps. + * + * @param isPublic Set to true to use public upload endpoint (by default it is auto detected) + * @param forceRecreate Force a new uploader instance - main purpose is for testing + */ +export function getUploader(isPublic: boolean = isPublicShare(), forceRecreate = false): Uploader { + if (forceRecreate || window._nc_uploader === undefined) { + window._nc_uploader = new Uploader(isPublic) + } + + return window._nc_uploader +} diff --git a/lib/upload/index.ts b/lib/upload/index.ts new file mode 100644 index 00000000..c429cfbb --- /dev/null +++ b/lib/upload/index.ts @@ -0,0 +1,12 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +export type { Eta, EtaEventsMap } from './uploader/index.ts' +export type { IDirectory, Directory } from './utils/fileTree.ts' + +export { getUploader, upload } from './getUploader.ts' +export { Upload, Status as UploadStatus } from './upload.ts' +export { Uploader, UploaderStatus, EtaStatus } from './uploader/index.ts' +export { getConflicts, hasConflict } from './utils/conflicts.ts' diff --git a/lib/upload/upload.ts b/lib/upload/upload.ts new file mode 100644 index 00000000..59dfea2a --- /dev/null +++ b/lib/upload/upload.ts @@ -0,0 +1,124 @@ +/** + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import type { AxiosResponse } from 'axios' +import { getMaxChunksSize } from './utils/config.js' + +export enum Status { + INITIALIZED = 0, + UPLOADING = 1, + ASSEMBLING = 2, + FINISHED = 3, + CANCELLED = 4, + FAILED = 5, +} +export class Upload { + + private _source: string + private _file: File + private _isChunked: boolean + private _chunks: number + + private _size: number + private _uploaded = 0 + private _startTime = 0 + + private _status: Status = Status.INITIALIZED + private _controller: AbortController + private _response: AxiosResponse|null = null + + constructor(source: string, chunked = false, size: number, file: File) { + const chunks = Math.min(getMaxChunksSize() > 0 ? Math.ceil(size / getMaxChunksSize()) : 1, 10000) + this._source = source + this._isChunked = chunked && getMaxChunksSize() > 0 && chunks > 1 + this._chunks = this._isChunked ? chunks : 1 + this._size = size + this._file = file + this._controller = new AbortController() + } + + get source(): string { + return this._source + } + + get file(): File { + return this._file + } + + get isChunked(): boolean { + return this._isChunked + } + + get chunks(): number { + return this._chunks + } + + get size(): number { + return this._size + } + + get startTime(): number { + return this._startTime + } + + set response(response: AxiosResponse|null) { + this._response = response + } + + get response(): AxiosResponse|null { + return this._response + } + + get uploaded(): number { + return this._uploaded + } + + /** + * Update the uploaded bytes of this upload + */ + set uploaded(length: number) { + if (length >= this._size) { + this._status = this._isChunked + ? Status.ASSEMBLING + : Status.FINISHED + this._uploaded = this._size + return + } + + this._status = Status.UPLOADING + this._uploaded = length + + // If first progress, let's log the start time + if (this._startTime === 0) { + this._startTime = new Date().getTime() + } + } + + get status(): number { + return this._status + } + + /** + * Update this upload status + */ + set status(status: Status) { + this._status = status + } + + /** + * Returns the axios cancel token source + */ + get signal(): AbortSignal { + return this._controller.signal + } + + /** + * Cancel any ongoing requests linked to this upload + */ + cancel() { + this._controller.abort() + this._status = Status.CANCELLED + } + +} diff --git a/lib/upload/uploader/eta.spec.ts b/lib/upload/uploader/eta.spec.ts new file mode 100644 index 00000000..39244bd3 --- /dev/null +++ b/lib/upload/uploader/eta.spec.ts @@ -0,0 +1,316 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import { afterAll, beforeAll, describe, expect, it, test, vi } from 'vitest' +import { Eta, EtaStatus } from './eta.ts' + +describe('ETA - status', () => { + it('has default set', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.timeReadable).toBe('estimating time left') + expect(eta.speed).toBe(-1) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('can autostart in constructor', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.timeReadable).toBe('estimating time left') + expect(eta.speed).toBe(-1) + }) + + it('can reset', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + + eta.add(10) + expect(eta.progress).toBe(10) + + eta.reset() + expect(eta.status).toBe(EtaStatus.Idle) + expect(eta.progress).toBe(0) + }) + + it('does not update when idle', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + + eta.update(10, 100) + expect(eta.progress).toBe(0) + + eta.add(10) + expect(eta.progress).toBe(0) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('does not update when paused', () => { + const eta = new Eta({ start: true, total: 100 }) + eta.add(10) + expect(eta.progress).toBe(10) + + eta.pause() + eta.add(10) + expect(eta.progress).toBe(10) + expect(eta.status).toBe(EtaStatus.Paused) + }) + + it('can resume', () => { + const eta = new Eta() + expect(eta.status).toBe(EtaStatus.Idle) + eta.resume() + expect(eta.status).toBe(EtaStatus.Running) + }) +}) + +describe('ETA - progress', () => { + beforeAll(() => vi.useFakeTimers()) + afterAll(() => vi.useRealTimers()) + + test('progress calculation', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // First upload some parts with about 5MiB/s which should take 3s (total 20s) + for (let i = 1; i <= 6; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + expect(eta.progress).toBe(i * 2.5) + expect(eta.speed).toBe(-1) + expect(eta.speedReadable).toBe('') + expect(eta.time).toBe(Infinity) + } + + // this is reached after (virtual) 3s with 6 * 2.5MiB (=15MiB) data of 100MiB total + expect(eta.timeReadable).toBe('estimating time left') + + // Adding another 500ms with 5MiB/s will result in enough information for estimating + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + expect(eta.progress).toBe(17.5) + expect(eta.speed).toMatchInlineSnapshot('4826778') + expect(eta.speedReadable).toMatchInlineSnapshot('"4.6 MB∕s"') + expect(eta.time).toMatchInlineSnapshot('18') + expect(eta.timeReadable).toMatchInlineSnapshot('"18 seconds left"') + + // Skip forward another 4.5seconds + for (let i = 0; i < 9; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + } + // See we made some progress + expect(eta.progress).toBe(40) + // See as we have constant speed, the speed is closing to 5MiB/s (5242880) + expect(eta.speed).toMatchInlineSnapshot('5060836') + expect(eta.speedReadable).toMatchInlineSnapshot('"4.8 MB∕s"') + expect(eta.time).toMatchInlineSnapshot('12') + expect(eta.timeReadable).toMatchInlineSnapshot('"12 seconds left"') + + // Having a spike of 10MiB/s will not result in halfing the eta + vi.advanceTimersByTime(500) + eta.add(5 * 1024 * 1024) + expect(eta.progress).toBe(45) + // See the value is not doubled + expect(eta.speed).toMatchInlineSnapshot('5208613') + expect(eta.speedReadable).toMatchInlineSnapshot('"5 MB∕s"') + // And the time has not halved + expect(eta.time).toMatchInlineSnapshot('11') + expect(eta.timeReadable).toMatchInlineSnapshot('"11 seconds left"') + + // Add another 3 seconds so we should see 'few seconds left' + for (let i = 0; i < 6; i++) { + vi.advanceTimersByTime(500) + eta.add(2.5 * 1024 * 1024) + } + expect(eta.progress).toBe(60) + expect(eta.speed).toMatchInlineSnapshot('5344192') + expect(eta.time).toMatchInlineSnapshot('8') + expect(eta.timeReadable).toMatchInlineSnapshot('"a few seconds left"') + }) + + test('long running progress', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // First upload some parts with about 1MiB/s + for (let i = 1; i <= 6; i++) { + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(i / 2) + expect(eta.speed).toBe(-1) + expect(eta.time).toBe(Infinity) + } + + // Now we should be able to see some progress + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(3.5) + expect(eta.time).toBe(105) + // time is over 1 minute so we see the formatted output + expect(eta.timeReadable).toMatchInlineSnapshot('"00:01:45 left"') + + // Add another minute and we should see only seconds: + for (let i = 0; i < 120; i++) { + vi.advanceTimersByTime(500) + eta.add(512 * 1024) + expect(eta.progress).toBe(4 + 0.5 * i) + } + + // Now we have uploaded 63.5 MiB - so 36.5 MiB missing by having 1MiB/s upload speed we expect 37 seconds left: + expect(eta.progress).toBe(63.5) + expect(eta.time).toBe(37) + expect(eta.timeReadable).toMatchInlineSnapshot('"37 seconds left"') + }) + + test('progress calculation for fast uploads', () => { + const eta = new Eta({ start: true, total: 100 * 1024 * 1024, cutoffTime: 2.5 }) + expect(eta.progress).toBe(0) + + // we have 100 MiB - when uploading with 40 MiB/s the time will be just like 2.5 seconds + // so not enough for estimation, instead we use the current speed to at least show that it is very fast + + // First chunk will not show any information as we initialize the system + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(20) + expect(eta.speed).toBe(-1) + expect(eta.time).toBe(Infinity) + expect(eta.timeReadable).toBe('estimating time left') + + // Now we have some information but not enough for normal estimation + // yet we show some information as the upload is very fast (40% per second) + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(40) + expect(eta.time).toBe(1.5) + expect(eta.timeReadable).toBe('a few seconds left') + // still no speed information + expect(eta.speed).toBe(-1) + + // same check for the last 60MiB + for (let i = 1; i <= 3; i++) { + vi.advanceTimersByTime(500) + eta.add(20 * 1024 * 1024) + expect(eta.progress).toBe(40 + i * 20) + expect(eta.time).toBe(1.5 - (i / 2)) + expect(eta.timeReadable).toBe('a few seconds left') + // still no speed information + expect(eta.speed).toBe(-1) + } + expect(eta.progress).toBe(100) + }) + + it('can autostart in constructor', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + expect(eta.progress).toBe(0) + expect(eta.time).toBe(Infinity) + expect(eta.timeReadable).toBe('estimating time left') + expect(eta.speed).toBe(-1) + }) + + it('can reset', () => { + const eta = new Eta({ start: true, total: 100 }) + expect(eta.status).toBe(EtaStatus.Running) + + eta.add(10) + expect(eta.progress).toBe(10) + + eta.reset() + expect(eta.status).toBe(EtaStatus.Idle) + expect(eta.progress).toBe(0) + }) + + it('does not update when idle', () => { + const eta = new Eta() + expect(eta.progress).toBe(0) + + eta.update(10, 100) + expect(eta.progress).toBe(0) + + eta.add(10) + expect(eta.progress).toBe(0) + expect(eta.status).toBe(EtaStatus.Idle) + }) + + it('does not update when paused', () => { + const eta = new Eta({ start: true, total: 100 }) + eta.add(10) + expect(eta.progress).toBe(10) + + eta.pause() + eta.add(10) + expect(eta.progress).toBe(10) + expect(eta.status).toBe(EtaStatus.Paused) + }) + + it('can resume', () => { + const eta = new Eta() + expect(eta.status).toBe(EtaStatus.Idle) + eta.resume() + expect(eta.status).toBe(EtaStatus.Running) + }) +}) + +describe('ETA - events', () => { + it('emits updated event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('update', spy) + + // only works when running so nothing should happen + eta.update(10, 100) + expect(spy).not.toBeCalled() + + // now start and update + eta.resume() + eta.update(10, 100) + expect(spy).toBeCalledTimes(1) + }) + + it('emits reset event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('reset', spy) + + eta.reset() + expect(spy).toBeCalledTimes(1) + }) + + it('emits pause event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('pause', spy) + + // cannot pause if not running + eta.pause() + expect(spy).toBeCalledTimes(0) + + // start + eta.resume() + expect(spy).toBeCalledTimes(0) + + // Pause - this time the event should be emitted + eta.pause() + expect(spy).toBeCalledTimes(1) + // double pause does nothing + eta.pause() + expect(spy).toBeCalledTimes(1) + }) + + it('emits resume event', () => { + const spy = vi.fn() + const eta = new Eta() + eta.addEventListener('resume', spy) + + eta.resume() + expect(spy).toBeCalledTimes(1) + // already resumed so nothing happens + eta.resume() + expect(spy).toBeCalledTimes(1) + }) +}) diff --git a/lib/upload/uploader/eta.ts b/lib/upload/uploader/eta.ts new file mode 100644 index 00000000..b12dc57a --- /dev/null +++ b/lib/upload/uploader/eta.ts @@ -0,0 +1,225 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { TypedEventTarget } from 'typescript-event-target' +import { n, t } from '../utils/l10n.ts' +import { formatFileSize } from '@nextcloud/files' + +export enum EtaStatus { + Idle = 0, + Paused = 1, + Running = 2, +} + +interface EtaOptions { + /** Low pass filter cutoff time for smoothing the speed */ + cutoffTime?: number + /** Total number of bytes to be expected */ + total?: number + /** Start the estimation directly */ + start?: boolean +} + +export interface EtaEventsMap { + pause: CustomEvent + reset: CustomEvent + resume: CustomEvent + update: CustomEvent +} + +export class Eta extends TypedEventTarget { + + /** Bytes done */ + private _done: number = 0 + /** Total bytes to do */ + private _total: number = 0 + /** Current progress (cached) as interval [0,1] */ + private _progress: number = 0 + /** Status of the ETA */ + private _status: EtaStatus = EtaStatus.Idle + /** Time of the last update */ + private _startTime: number = -1 + /** Total elapsed time for current ETA */ + private _elapsedTime: number = 0 + /** Current speed in bytes per second */ + private _speed: number = -1 + /** Expected duration to finish in seconds */ + private _eta: number = Infinity + + /** + * Cutoff time for the low pass filter of the ETA. + * A higher value will consider more history information for calculation, + * and thus suppress spikes of the speed, + * but will make the overall resposiveness slower. + */ + private _cutoffTime = 2.5 + + public constructor(options: EtaOptions = {}) { + super() + if (options.start) { + this.resume() + } + if (options.total) { + this.update(0, options.total) + } + this._cutoffTime = options.cutoffTime ?? 2.5 + } + + /** + * Add more transferred bytes. + * @param done Additional bytes done. + */ + public add(done: number): void { + this.update(this._done + done) + } + + /** + * Update the transmission state. + * + * @param done The new value of transferred bytes. + * @param total Optionally also update the total bytes we expect. + */ + public update(done: number, total?: number): void { + if (this.status !== EtaStatus.Running) { + return + } + if (total && total > 0) { + this._total = total + } + + const deltaDone = done - this._done + const deltaTime = (Date.now() - this._startTime) / 1000 + + this._startTime = Date.now() + this._elapsedTime += deltaTime + this._done = done + this._progress = this._done / this._total + + // Only update speed when the history is large enough so we can estimate it + const historyNeeded = this._cutoffTime + deltaTime + if (this._elapsedTime > historyNeeded) { + // Filter the done bytes using a low pass filter to suppress speed spikes + const alpha = deltaTime / (deltaTime + (1 / this._cutoffTime)) + const filtered = (this._done - deltaDone) + (1 - alpha) * deltaDone + // bytes per second - filtered + this._speed = Math.round(filtered / this._elapsedTime) + } else if (this._speed === -1 && this._elapsedTime > deltaTime) { + // special case when uploading with high speed + // it could be that the upload is finished before we reach the curoff time + // so we already give an estimation + const remaining = this._total - done + const eta = remaining / (done / this._elapsedTime) + // Only set the ETA when we either already set it for a previous update + // or when the special case happened that we are in fast upload and we only got a couple of seconds for the whole upload + // meaning we are below 2x the cutoff time. + if (this._eta !== Infinity || eta <= 2 * this._cutoffTime) { + // We only take a couple of seconds so we set the eta to the current ETA using current speed. + // But we do not set the speed because we do not want to trigger the real ETA calculation below + // and especially because the speed would be very spiky (we still have no filters in place). + this._eta = eta + } + } + + // Update the eta if we have valid speed information (prevent divide by zero) + if (this._speed > 0) { + // Estimate transfer of remaining bytes with current average speed + this._eta = Math.round((this._total - this._done) / this._speed) + } + + this.dispatchTypedEvent('update', new CustomEvent('update', { cancelable: false })) + } + + public reset(): void { + this._done = 0 + this._total = 0 + this._progress = 0 + this._elapsedTime = 0 + this._eta = Infinity + this._speed = -1 + this._startTime = -1 + this._status = EtaStatus.Idle + this.dispatchTypedEvent('reset', new CustomEvent('reset')) + } + + /** + * Pause the ETA calculation. + */ + public pause(): void { + if (this._status === EtaStatus.Running) { + this._status = EtaStatus.Paused + this._elapsedTime += (Date.now() - this._startTime) / 1000 + this.dispatchTypedEvent('pause', new CustomEvent('pause')) + } + } + + /** + * Resume the ETA calculation. + */ + public resume(): void { + if (this._status !== EtaStatus.Running) { + this._startTime = Date.now() + this._status = EtaStatus.Running + this.dispatchTypedEvent('resume', new CustomEvent('resume')) + } + } + + /** + * Status of the Eta (paused, active, idle). + */ + public get status(): EtaStatus { + return this._status + } + + /** + * Progress (percent done) + */ + public get progress(): number { + return Math.round(this._progress * 10000) / 100 + } + + /** + * Estimated time in seconds. + */ + public get time(): number { + return this._eta + } + + /** + * Human readable version of the estimated time. + */ + public get timeReadable(): string { + if (this._eta === Infinity) { + return t('estimating time left') + } else if (this._eta < 10) { + return t('a few seconds left') + } else if (this._eta < 60) { + return n('{seconds} seconds left', '{seconds} seconds left', this._eta, { seconds: this._eta }) + } + + const hours = String(Math.floor(this._eta / 3600)).padStart(2, '0') + const minutes = String(Math.floor((this._eta % 3600) / 60)).padStart(2, '0') + const seconds = String(this._eta % 60).padStart(2, '0') + return t('{time} left', { time: `${hours}:${minutes}:${seconds}` }) // TRANSLATORS time has the format 00:00:00 + } + + /** + * Transfer speed in bytes per second. + * Returns `-1` if not yet estimated. + */ + public get speed(): number { + return this._speed + } + + /** + * Get the speed in human readable format using file sizes like 10KB/s. + * Returns the empty string if not yet estimated. + */ + public get speedReadable(): string { + return this._speed > 0 + ? `${formatFileSize(this._speed, true)}∕s` + : '' + } + +} diff --git a/lib/upload/uploader/index.ts b/lib/upload/uploader/index.ts new file mode 100644 index 00000000..1111408d --- /dev/null +++ b/lib/upload/uploader/index.ts @@ -0,0 +1,15 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +export { + type Eta, + type EtaEventsMap, + EtaStatus, +} from './eta.ts' + +export { + Uploader, + UploaderStatus, +} from './uploader.ts' diff --git a/lib/upload/uploader/uploader.ts b/lib/upload/uploader/uploader.ts new file mode 100644 index 00000000..0f96c43e --- /dev/null +++ b/lib/upload/uploader/uploader.ts @@ -0,0 +1,708 @@ +/** + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import type { AxiosError, AxiosResponse } from 'axios' +import type { WebDAVClient } from 'webdav' +import type { IDirectory } from '../utils/fileTree.ts' + +import { getCurrentUser } from '@nextcloud/auth' +import { FileType, Folder, Permission, davGetClient, davRemoteURL, davRootPath } from '@nextcloud/files' +import { encodePath } from '@nextcloud/paths' +import { normalize } from 'path' +import { getCapabilities } from '@nextcloud/capabilities' + +import axios, { isCancel } from '@nextcloud/axios' +import PCancelable from 'p-cancelable' +import PQueue from 'p-queue' + +import { UploadCancelledError } from '../errors/UploadCancelledError.ts' +import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' +import { getMaxChunksSize } from '../utils/config.ts' +import { Status as UploadStatus, Upload } from '../upload.ts' +import { isFileSystemFileEntry } from '../utils/filesystem.ts' +import { Directory } from '../utils/fileTree.ts' +import { t } from '../utils/l10n.ts' +import logger from '../utils/logger.ts' +import { Eta } from './eta.ts' + +export enum UploaderStatus { + IDLE = 0, + UPLOADING = 1, + PAUSED = 2 +} + +export class Uploader { + + // Initialized via setter in the constructor + private _destinationFolder!: Folder + private _isPublic: boolean + private _customHeaders: Record + + // Global upload queue + private _uploadQueue: Array = [] + private _jobQueue: PQueue = new PQueue({ + // Maximum number of concurrent uploads + // @ts-expect-error TS2339 Object has no defined properties + concurrency: getCapabilities().files?.chunked_upload?.max_parallel_count ?? 5, + }) + + private _queueSize = 0 + private _queueProgress = 0 + private _queueStatus: UploaderStatus = UploaderStatus.IDLE + + private _eta = new Eta() + + private _notifiers: Array<(upload: Upload) => void> = [] + + /** + * Initialize uploader + * + * @param {boolean} isPublic are we in public mode ? + * @param {Folder} destinationFolder the context folder to operate, relative to the root folder + */ + constructor( + isPublic = false, + destinationFolder?: Folder, + ) { + this._isPublic = isPublic + this._customHeaders = {} + + if (!destinationFolder) { + const source = `${davRemoteURL}${davRootPath}` + let owner: string + + if (isPublic) { + owner = 'anonymous' + } else { + const user = getCurrentUser()?.uid + if (!user) { + throw new Error('User is not logged in') + } + owner = user + } + + destinationFolder = new Folder({ + id: 0, + owner, + permissions: Permission.ALL, + root: davRootPath, + source, + }) + } + this.destination = destinationFolder + + logger.debug('Upload workspace initialized', { + destination: this.destination, + root: this.root, + isPublic, + maxChunksSize: getMaxChunksSize(), + }) + } + + /** + * Get the upload destination path relative to the root folder + */ + get destination(): Folder { + return this._destinationFolder + } + + /** + * Set the upload destination path relative to the root folder + */ + set destination(folder: Folder) { + if (!folder || folder.type !== FileType.Folder || !folder.source) { + throw new Error('Invalid destination folder') + } + + logger.debug('Destination set', { folder }) + this._destinationFolder = folder + } + + /** + * Get the root folder + */ + get root() { + return this._destinationFolder.source + } + + /** + * Get registered custom headers for uploads + */ + get customHeaders(): Record { + return structuredClone(this._customHeaders) + } + + /** + * Set a custom header + * @param name The header to set + * @param value The string value + */ + setCustomHeader(name: string, value: string = ''): void { + this._customHeaders[name] = value + } + + /** + * Unset a custom header + * @param name The header to unset + */ + deleteCustomerHeader(name: string): void { + delete this._customHeaders[name] + } + + /** + * Get the upload queue + */ + get queue(): Upload[] { + return this._uploadQueue + } + + private reset() { + // Reset the ETA + this._eta.reset() + // If there is no upload in the queue and no job in the queue + if (this._uploadQueue.length === 0 && this._jobQueue.size === 0) { + return + } + + // Reset upload queue but keep the reference + this._uploadQueue.splice(0, this._uploadQueue.length) + this._jobQueue.clear() + this._queueSize = 0 + this._queueProgress = 0 + this._queueStatus = UploaderStatus.IDLE + logger.debug('Uploader state reset') + } + + /** + * Pause any ongoing upload(s) + */ + public pause() { + this._eta.pause() + this._jobQueue.pause() + this._queueStatus = UploaderStatus.PAUSED + this.updateStats() + logger.debug('Uploader paused') + } + + /** + * Resume any pending upload(s) + */ + public start() { + this._eta.resume() + this._jobQueue.start() + this._queueStatus = UploaderStatus.UPLOADING + this.updateStats() + logger.debug('Uploader resumed') + } + + /** + * Get the estimation for the uploading time. + */ + get eta(): Eta { + return this._eta + } + + /** + * Get the upload queue stats + */ + get info() { + return { + size: this._queueSize, + progress: this._queueProgress, + status: this._queueStatus, + } + } + + private updateStats() { + const size = this._uploadQueue.map(upload => upload.size) + .reduce((partialSum, a) => partialSum + a, 0) + const uploaded = this._uploadQueue.map(upload => upload.uploaded) + .reduce((partialSum, a) => partialSum + a, 0) + + this._eta.update(uploaded, size) + this._queueSize = size + this._queueProgress = uploaded + + // If already paused keep it that way + if (this._queueStatus !== UploaderStatus.PAUSED) { + const pending = this._uploadQueue.find(({ status }) => [UploadStatus.INITIALIZED, UploadStatus.UPLOADING, UploadStatus.ASSEMBLING].includes(status)) + if (this._jobQueue.size > 0 || pending) { + this._queueStatus = UploaderStatus.UPLOADING + } else { + this.eta.reset() + this._queueStatus = UploaderStatus.IDLE + } + } + } + + addNotifier(notifier: (upload: Upload) => void) { + this._notifiers.push(notifier) + } + + /** + * Notify listeners of the upload completion + * @param upload The upload that finished + */ + private _notifyAll(upload: Upload): void { + for (const notifier of this._notifiers) { + try { + notifier(upload) + } catch (error) { + logger.warn('Error in upload notifier', { error, source: upload.source }) + } + } + } + + /** + * Uploads multiple files or folders while preserving the relative path (if available) + * @param {string} destination The destination path relative to the root folder. e.g. /foo/bar (a file "a.txt" will be uploaded then to "/foo/bar/a.txt") + * @param {Array} files The files and/or folders to upload + * @param {Function} callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) + * @return Cancelable promise that resolves to an array of uploads + * + * @example + * ```ts + * // For example this is from handling the onchange event of an input[type=file] + * async handleFiles(files: File[]) { + * this.uploads = await this.uploader.batchUpload('uploads', files, this.handleConflicts) + * } + * + * async handleConflicts(nodes: File[], currentPath: string) { + * const conflicts = getConflicts(nodes, this.fetchContent(currentPath)) + * if (conflicts.length === 0) { + * // No conflicts so upload all + * return nodes + * } else { + * // Open the conflict picker to resolve conflicts + * try { + * const { selected, renamed } = await openConflictPicker(currentPath, conflicts, this.fetchContent(currentPath), { recursive: true }) + * return [...selected, ...renamed] + * } catch (e) { + * return false + * } + * } + * } + * ``` + */ + batchUpload( + destination: string, + files: (File|FileSystemEntry)[], + callback?: (nodes: Array, currentPath: string) => Promise|false>, + ): PCancelable { + if (!callback) { + callback = async (files: Array) => files + } + + return new PCancelable(async (resolve, reject, onCancel) => { + const rootFolder = new Directory('') + await rootFolder.addChildren(files) + // create a meta upload to ensure all ongoing child requests are listed + const target = `${this.root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const upload = new Upload(target, false, 0, rootFolder) + upload.status = UploadStatus.UPLOADING + this._uploadQueue.push(upload) + + logger.debug('Starting new batch upload', { target }) + try { + // setup client with root and custom header + const client = davGetClient(this.root, this._customHeaders) + // Create the promise for the virtual root directory + const promise = this.uploadDirectory(destination, rootFolder, callback, client) + // Make sure to cancel it when requested + onCancel(() => promise.cancel()) + // await the uploads and resolve with "finished" status + const uploads = await promise + upload.status = UploadStatus.FINISHED + resolve(uploads) + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError) { + logger.info('Upload cancelled by user', { error }) + upload.status = UploadStatus.CANCELLED + reject(new UploadCancelledError(error)) + } else { + logger.error('Error in batch upload', { error }) + upload.status = UploadStatus.FAILED + reject(error) + } + } finally { + // Upload queue is cleared when all the uploading jobs are done + // Meta upload unlike real uploading does not create a job + // Removing it manually here to make sure it is remove even when no uploading happened and there was nothing to finish + this._uploadQueue.splice(this._uploadQueue.indexOf(upload), 1) + this._notifyAll(upload) + this.updateStats() + } + }) + } + + /** + * Helper to create a directory wrapped inside an Upload class + * @param destination Destination where to create the directory + * @param directory The directory to create + * @param client The cached WebDAV client + */ + private createDirectory(destination: string, directory: Directory, client: WebDAVClient): PCancelable { + const folderPath = normalize(`${destination}/${directory.name}`).replace(/\/$/, '') + const rootPath = `${this.root.replace(/\/$/, '')}/${folderPath.replace(/^\//, '')}` + + if (!directory.name) { + throw new Error('Can not create empty directory') + } + + // Add a new upload to the upload queue + const currentUpload: Upload = new Upload(rootPath, false, 0, directory) + this._uploadQueue.push(currentUpload) + + // Return the cancelable promise + return new PCancelable(async (resolve, reject, onCancel) => { + const abort = new AbortController() + onCancel(() => abort.abort()) + currentUpload.signal.addEventListener('abort', () => reject(t('Upload has been cancelled'))) + + // Add the request to the job queue -> wait for finish to resolve the promise + await this._jobQueue.add(async () => { + currentUpload.status = UploadStatus.UPLOADING + try { + await client.createDirectory(folderPath, { signal: abort.signal }) + resolve(currentUpload) + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError) { + currentUpload.status = UploadStatus.CANCELLED + reject(new UploadCancelledError(error)) + } else if (error && typeof error === 'object' && 'status' in error && error.status === 405) { + // Directory already exists, so just write into it and ignore the error + logger.debug('Directory already exists, writing into it', { directory: directory.name }) + currentUpload.status = UploadStatus.FINISHED + resolve(currentUpload) + } else { + // Another error happened, so abort uploading the directory + currentUpload.status = UploadStatus.FAILED + reject(error) + } + } finally { + // Update statistics + this._notifyAll(currentUpload) + this.updateStats() + } + }) + }) + } + + // Helper for uploading directories (recursively) + private uploadDirectory( + destination: string, + directory: Directory, + callback: (nodes: Array, currentPath: string) => Promise|false>, + // client as parameter to cache it for performance + client: WebDAVClient, + ): PCancelable { + const folderPath = normalize(`${destination}/${directory.name}`).replace(/\/$/, '') + + return new PCancelable(async (resolve, reject, onCancel) => { + const abort = new AbortController() + onCancel(() => abort.abort()) + + // Let the user handle conflicts + const selectedForUpload = await callback(directory.children, folderPath) + if (selectedForUpload === false) { + logger.debug('Upload canceled by user', { directory }) + reject(new UploadCancelledError('Conflict resolution cancelled by user')) + return + } else if (selectedForUpload.length === 0 && directory.children.length > 0) { + logger.debug('Skipping directory, as all files were skipped by user', { directory }) + resolve([]) + return + } + + const directories: PCancelable[] = [] + const uploads: PCancelable[] = [] + // Setup abort controller to cancel all child requests + abort.signal.addEventListener('abort', () => { + directories.forEach((upload) => upload.cancel()) + uploads.forEach((upload) => upload.cancel()) + }) + + logger.debug('Start directory upload', { directory }) + try { + if (directory.name) { + // If not the virtual root we need to create the directory first before uploading + // Make sure the promise is listed in the final result + uploads.push(this.createDirectory(destination, directory, client) as PCancelable) + // Ensure the directory is created before uploading / creating children + await uploads.at(-1) + } + + for (const node of selectedForUpload) { + if (node instanceof Directory) { + directories.push(this.uploadDirectory(folderPath, node, callback, client)) + } else { + uploads.push(this.upload(`${folderPath}/${node.name}`, node)) + } + } + + const resolvedUploads = await Promise.all(uploads) + const resolvedDirectoryUploads = await Promise.all(directories) + resolve([resolvedUploads, ...resolvedDirectoryUploads].flat()) + } catch (e) { + // Ensure a failure cancels all other requests + abort.abort(e) + reject(e) + } + }) + } + + /** + * Upload a file to the given path + * @param {string} destination the destination path relative to the root folder. e.g. /foo/bar.txt + * @param {File|FileSystemFileEntry} fileHandle the file to upload + * @param {string} root the root folder to upload to + * @param retries number of retries + */ + upload(destination: string, fileHandle: File|FileSystemFileEntry, root?: string, retries: number = 5): PCancelable { + root = root || this.root + const destinationPath = `${root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + + // Get the encoded source url to this object for requests purposes + const { origin } = new URL(destinationPath) + const encodedDestinationFile = origin + encodePath(destinationPath.slice(origin.length)) + + this.eta.resume() + logger.debug(`Uploading ${fileHandle.name} to ${encodedDestinationFile}`) + + const promise = new PCancelable(async (resolve, reject, onCancel): Promise => { + // Handle file system entries by retrieving the file handle + if (isFileSystemFileEntry(fileHandle)) { + fileHandle = await new Promise((resolve) => (fileHandle as FileSystemFileEntry).file(resolve, reject)) + } + // We can cast here as we handled system entries in the if above + const file = fileHandle as File + + // @ts-expect-error TS2339 Object has no defined properties + const supportsPublicChunking = getCapabilities().dav?.public_shares_chunking ?? false + const maxChunkSize = getMaxChunksSize('size' in file ? file.size : undefined) + // If manually disabled or if the file is too small + const disabledChunkUpload = (this._isPublic && !supportsPublicChunking) + || maxChunkSize === 0 + || ('size' in file && file.size < maxChunkSize) + + const upload = new Upload(destinationPath, !disabledChunkUpload, file.size, file) + this._uploadQueue.push(upload) + this.updateStats() + + // Register cancellation caller + onCancel(upload.cancel) + + if (!disabledChunkUpload) { + logger.debug('Initializing chunked upload', { file, upload }) + + // Let's initialize a chunk upload + const tempUrl = await initChunkWorkspace(encodedDestinationFile, retries, this._isPublic, this._customHeaders) + const chunksQueue: Array> = [] + + // Generate chunks array + for (let chunk = 0; chunk < upload.chunks; chunk++) { + const bufferStart = chunk * maxChunkSize + // Don't go further than the file size + const bufferEnd = Math.min(bufferStart + maxChunkSize, upload.size) + // Make it a Promise function for better memory management + const blob = () => getChunk(file, bufferStart, maxChunkSize) + + // Init request queue + const request = () => { + // bytes uploaded on this chunk (as upload.uploaded tracks all chunks) + let chunkBytes = 0 + return uploadData( + `${tempUrl}/${chunk + 1}`, + blob, + { + signal: upload.signal, + destinationFile: encodedDestinationFile, + retries, + onUploadProgress: ({ bytes }) => { + // Only count 90% of bytes as the request is not yet processed by server + // we set the remaining 10% when the request finished (server responded). + const progressBytes = bytes * 0.9 + chunkBytes += progressBytes + upload.uploaded += progressBytes + this.updateStats() + }, + onUploadRetry: () => { + // Current try failed, so reset the stats for this chunk + // meaning remove the uploaded chunk bytes from stats + upload.uploaded -= chunkBytes + chunkBytes = 0 + this.updateStats() + }, + headers: { + ...this._customHeaders, + ...this._mtimeHeader(file), + 'OC-Total-Length': file.size, + 'Content-Type': 'application/octet-stream', + }, + }, + ) + // Update upload progress on chunk completion + .then(() => { + // request fully done so we uploaded the full chunk + // we first remove the intermediate chunkBytes from progress events + // and then add the real full size + upload.uploaded += bufferEnd - bufferStart - chunkBytes + this.updateStats() + }) + .catch((error) => { + if (error?.response?.status === 507) { + logger.error('Upload failed, not enough space on the server or quota exceeded. Cancelling the remaining chunks', { error, upload }) + upload.cancel() + upload.status = UploadStatus.FAILED + throw error + } + + if (!isCancel(error)) { + logger.error(`Chunk ${chunk + 1} ${bufferStart} - ${bufferEnd} uploading failed`, { error, upload }) + upload.cancel() + upload.status = UploadStatus.FAILED + } + throw error + }) + } + chunksQueue.push(this._jobQueue.add(request)) + } + + const request = async () => { + try { + // Once all chunks are sent, assemble the final file + await Promise.all(chunksQueue) + + // Assemble the chunks + upload.status = UploadStatus.ASSEMBLING + this.updateStats() + + // Send the assemble request + upload.response = await axios.request({ + method: 'MOVE', + url: `${tempUrl}/.file`, + headers: { + ...this._customHeaders, + ...this._mtimeHeader(file), + 'OC-Total-Length': file.size, + Destination: encodedDestinationFile, + }, + }) + upload.status = UploadStatus.FINISHED + this.updateStats() + + logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) + resolve(upload) + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError) { + upload.status = UploadStatus.CANCELLED + reject(new UploadCancelledError(error)) + } else { + upload.status = UploadStatus.FAILED + reject(t('Failed to assemble the chunks together')) + } + // Cleaning up temp directory + axios.request({ + method: 'DELETE', + url: `${tempUrl}`, + }) + } finally { + // Notify listeners of the upload completion + this._notifyAll(upload) + } + } + + this._jobQueue.add(request) + } else { + logger.debug('Initializing regular upload', { file, upload }) + + // Generating upload limit + const blob = await getChunk(file, 0, upload.size) + const request = async () => { + try { + upload.response = await uploadData( + encodedDestinationFile, + blob, + { + signal: upload.signal, + onUploadProgress: ({ bytes }) => { + // As this is only the sent bytes not the processed ones we only count 90%. + // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. + upload.uploaded += bytes * 0.9 + this.updateStats() + }, + onUploadRetry: () => { + upload.uploaded = 0 + this.updateStats() + }, + headers: { + ...this._customHeaders, + ...this._mtimeHeader(file), + 'Content-Type': file.type, + }, + }, + ) + + // Update progress - now we set the uploaded size to 100% of the file size + upload.uploaded = upload.size + this.updateStats() + + // Resolve + logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) + resolve(upload) + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError) { + upload.status = UploadStatus.CANCELLED + reject(new UploadCancelledError(error)) + return + } + + // Attach response to the upload object + if ((error as AxiosError)?.response) { + upload.response = (error as AxiosError).response as AxiosResponse + } + + upload.status = UploadStatus.FAILED + logger.error(`Failed uploading ${file.name}`, { error, file, upload }) + reject(t('Failed to upload the file')) + } + + // Notify listeners of the upload completion + this._notifyAll(upload) + } + this._jobQueue.add(request) + this.updateStats() + } + + // Reset when upload queue is done + // Only when we know we're closing on the last chunks + // and/or assembling we can reset the uploader. + // Otherwise he queue might be idle for a short time + // and clear the Upload queue before we're done. + this._jobQueue.onIdle() + .then(() => this.reset()) + + // Finally return the Upload + return upload + }) as PCancelable + + return promise + } + + /** + * Create modification time headers if valid value is available. + * It can be invalid on Android devices if SD cards with NTFS / FAT are used, + * as those files might use the NT epoch for time so the value will be negative. + * + * @param file The file to upload + */ + private _mtimeHeader(file: File): { 'X-OC-Mtime'?: number } { + const mtime = Math.floor(file.lastModified / 1000) + if (mtime > 0) { + return { 'X-OC-Mtime': mtime } + } + return {} + } + +} diff --git a/lib/upload/utils/config.ts b/lib/upload/utils/config.ts new file mode 100644 index 00000000..587feeda --- /dev/null +++ b/lib/upload/utils/config.ts @@ -0,0 +1,25 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +export const getMaxChunksSize = function(fileSize: number | undefined = undefined): number { + const maxChunkSize = window.OC?.appConfig?.files?.max_chunk_size + if (maxChunkSize <= 0) { + return 0 + } + + // If invalid return default + if (!Number(maxChunkSize)) { + return 10 * 1024 * 1024 + } + + // v2 of chunked upload requires chunks to be 5 MB at minimum + const minimumChunkSize = Math.max(Number(maxChunkSize), 5 * 1024 * 1024) + + if (fileSize === undefined) { + return minimumChunkSize + } + + // Adapt chunk size to fit the file in 10000 chunks for chunked upload v2 + return Math.max(minimumChunkSize, Math.ceil(fileSize / 10000)) +} diff --git a/lib/upload/utils/conflicts.ts b/lib/upload/utils/conflicts.ts new file mode 100644 index 00000000..dc3d554d --- /dev/null +++ b/lib/upload/utils/conflicts.ts @@ -0,0 +1,32 @@ +/** + * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { Node } from '@nextcloud/files' + +/** + * Check if there is a conflict between two sets of files + * @param {Array} files the incoming files + * @param {Node[]} content all the existing files in the directory + * @return {boolean} true if there is a conflict + */ +export function hasConflict(files: (File|FileSystemEntry|Node)[], content: Node[]): boolean { + return getConflicts(files, content).length > 0 +} + +/** + * Get the conflicts between two sets of files + * @param {Array} files the incoming files + * @param {Node[]} content all the existing files in the directory + * @return {boolean} true if there is a conflict + */ +export function getConflicts(files: T[], content: Node[]): T[] { + const contentNames = content.map((node: Node) => node.basename) + const conflicts = files.filter((node: File|FileSystemEntry|Node) => { + const name = 'basename' in node ? node.basename : node.name + return contentNames.indexOf(name) !== -1 + }) + + return conflicts +} diff --git a/lib/upload/utils/fileTree.ts b/lib/upload/utils/fileTree.ts new file mode 100644 index 00000000..e11aaeda --- /dev/null +++ b/lib/upload/utils/fileTree.ts @@ -0,0 +1,127 @@ +/** + * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +/** + * Helpers to generate a file tree when the File and Directory API is used (e.g. Drag and Drop or ) + */ + +import { basename } from '@nextcloud/paths' +import { isFileSystemDirectoryEntry, isFileSystemFileEntry } from './filesystem.ts' + +/** + * This is a helper class to allow building a file tree for uploading + * It allows to create virtual directories + */ +export class Directory extends File { + + private _originalName: string + private _path: string + private _children: Map + + constructor(path: string) { + super([], basename(path), { type: 'httpd/unix-directory', lastModified: 0 }) + this._children = new Map() + this._originalName = basename(path) + this._path = path + } + + get size(): number { + return this.children.reduce((sum, file) => sum + file.size, 0) + } + + get lastModified(): number { + return this.children.reduce((latest, file) => Math.max(latest, file.lastModified), 0) + } + + // We need this to keep track of renamed files + get originalName(): string { + return this._originalName + } + + get children(): Array { + return Array.from(this._children.values()) + } + + get webkitRelativePath(): string { + return this._path + } + + getChild(name: string): File|Directory|null { + return this._children.get(name) ?? null + } + + /** + * Add multiple children at once + * @param files The files to add + */ + async addChildren(files: Array): Promise { + for (const file of files) { + await this.addChild(file) + } + } + + /** + * Add a child to the directory. + * If it is a nested child the parents will be created if not already exist. + * @param file The child to add + */ + async addChild(file: File|FileSystemEntry) { + const rootPath = this._path && `${this._path}/` + if (isFileSystemFileEntry(file)) { + file = await new Promise((resolve, reject) => (file as FileSystemFileEntry).file(resolve, reject)) + } else if (isFileSystemDirectoryEntry(file)) { + const reader = file.createReader() + const entries = await new Promise((resolve, reject) => reader.readEntries(resolve, reject)) + + // Create a new child directory and add the entries + const child = new Directory(`${rootPath}${file.name}`) + await child.addChildren(entries) + this._children.set(file.name, child) + return + } + + // Make Typescript calm - we ensured it is not a file system entry above. + file = file as File + + const filePath = file.webkitRelativePath ?? file.name + // Handle plain files + if (!filePath.includes('/')) { + // Direct child of the directory + this._children.set(file.name, file) + } else { + // Check if file is a child + if (!filePath.startsWith(this._path)) { + throw new Error(`File ${filePath} is not a child of ${this._path}`) + } + + // If file is a child check if we need to nest it + const relPath = filePath.slice(rootPath.length) + const name = basename(relPath) + + if (name === relPath) { + // It is a direct child so we can add it + this._children.set(name, file) + } else { + // It is not a direct child so we need to create intermediate nodes + const base = relPath.slice(0, relPath.indexOf('/')) + if (this._children.has(base)) { + // It is a grandchild so we can add it directly + await (this._children.get(base) as Directory).addChild(file) + } else { + // We do not know any parent of that child + // so we need to add a new child on the current level + const child = new Directory(`${rootPath}${base}`) + await child.addChild(file) + this._children.set(base, child) + } + } + } + } + +} + +/** + * Interface of the internal Directory class + */ +export type IDirectory = Pick diff --git a/lib/upload/utils/filesystem.ts b/lib/upload/utils/filesystem.ts new file mode 100644 index 00000000..0602cc49 --- /dev/null +++ b/lib/upload/utils/filesystem.ts @@ -0,0 +1,12 @@ +/*! + * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +// Helpers for the File and Directory API + +// Helper to support browser that do not support the API +export const isFileSystemDirectoryEntry = (o: unknown): o is FileSystemDirectoryEntry => 'FileSystemDirectoryEntry' in window && o instanceof FileSystemDirectoryEntry + +export const isFileSystemFileEntry = (o: unknown): o is FileSystemFileEntry => 'FileSystemFileEntry' in window && o instanceof FileSystemFileEntry + +export const isFileSystemEntry = (o: unknown): o is FileSystemEntry => 'FileSystemEntry' in window && o instanceof FileSystemEntry diff --git a/lib/upload/utils/l10n.ts b/lib/upload/utils/l10n.ts new file mode 100644 index 00000000..49c9443b --- /dev/null +++ b/lib/upload/utils/l10n.ts @@ -0,0 +1,36 @@ +/** + * SPDX-FileCopyrightText: 2023 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import { getGettextBuilder } from '@nextcloud/l10n/gettext' + +const gtBuilder = getGettextBuilder() + .detectLocale() + +// @ts-expect-error __TRANSLATIONS__ is replaced by vite +__TRANSLATIONS__.map(data => gtBuilder.addTranslation(data.locale, data.json)) + +interface Gettext { + /** + * Get translated string (singular form), optionally with placeholders + * + * @param original original string to translate + * @param placeholders map of placeholder key to value + */ + gettext(original: string, placeholders?: Record): string + + /** + * Get translated string with plural forms + * + * @param singular Singular text form + * @param plural Plural text form to be used if `count` requires it + * @param count The number to insert into the text + * @param placeholders optional map of placeholder key to value + */ + ngettext(singular: string, plural: string, count: number, placeholders?: Record): string +} + +const gt = gtBuilder.build() as Gettext + +export const n = gt.ngettext.bind(gt) as typeof gt.ngettext +export const t = gt.gettext.bind(gt) as typeof gt.gettext diff --git a/lib/upload/utils/logger.ts b/lib/upload/utils/logger.ts new file mode 100644 index 00000000..1c50c431 --- /dev/null +++ b/lib/upload/utils/logger.ts @@ -0,0 +1,11 @@ +/** + * SPDX-FileCopyrightText: 2019 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { getLoggerBuilder } from '@nextcloud/logger' + +export default getLoggerBuilder() + .setApp('@nextcloud/upload') + .detectUser() + .build() diff --git a/lib/upload/utils/upload.ts b/lib/upload/utils/upload.ts new file mode 100644 index 00000000..869ad295 --- /dev/null +++ b/lib/upload/utils/upload.ts @@ -0,0 +1,150 @@ +/** + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosError, AxiosProgressEvent, AxiosResponse } from 'axios' + +import { getCurrentUser } from '@nextcloud/auth' +import axios from '@nextcloud/axios' +import { getSharingToken } from '@nextcloud/sharing/public' +import axiosRetry, { exponentialDelay, isNetworkOrIdempotentRequestError } from 'axios-retry' +import logger from './logger.ts' + +axiosRetry(axios, { retries: 0 }) + +type UploadData = Blob | (() => Promise) + +interface UploadDataOptions { + /** The abort signal */ + signal: AbortSignal + /** Upload progress event callback */ + onUploadProgress?: (event: AxiosProgressEvent) => void + /** Request retry callback (e.g. network error of previous try) */ + onUploadRetry?: () => void + /** The final destination file (for chunked uploads) */ + destinationFile?: string + /** Additional headers */ + headers?: Record + /** Number of retries */ + retries?: number, +} + +/** + * Upload some data to a given path + * @param url the url to upload to + * @param uploadData the data to upload + * @param uploadOptions upload options + */ +export async function uploadData( + url: string, + uploadData: UploadData, + uploadOptions: UploadDataOptions, +): Promise { + const options = { + headers: {}, + onUploadProgress: () => {}, + onUploadRetry: () => {}, + retries: 5, + ...uploadOptions, + } + + let data: Blob + + // If the upload data is a blob, we can directly use it + // Otherwise, we need to wait for the promise to resolve + if (uploadData instanceof Blob) { + data = uploadData + } else { + data = await uploadData() + } + + // Helps the server to know what to do with the file afterwards (e.g. chunked upload) + if (options.destinationFile) { + options.headers.Destination = options.destinationFile + } + + // If no content type is set, we default to octet-stream + if (!options.headers['Content-Type']) { + options.headers['Content-Type'] = 'application/octet-stream' + } + + return await axios.request({ + method: 'PUT', + url, + data, + signal: options.signal, + onUploadProgress: options.onUploadProgress, + headers: options.headers, + 'axios-retry': { + retries: options.retries, + retryDelay: (retryCount: number, error: AxiosError) => exponentialDelay(retryCount, error, 1000), + retryCondition(error: AxiosError): boolean { + // Do not retry on insufficient storage - this is permanent + if (error.status === 507) { + return false + } + // Do a retry on locked error as this is often just some preview generation + if (error.status === 423) { + return true + } + // Otherwise fallback to default behavior + return isNetworkOrIdempotentRequestError(error) + }, + onRetry: options.onUploadRetry, + }, + }) +} + +/** + * Get chunk of the file. + * Doing this on the fly give us a big performance boost and proper garbage collection + * @param file File to upload + * @param start Offset to start upload + * @param length Size of chunk to upload + */ +export const getChunk = function(file: File, start: number, length: number): Promise { + if (start === 0 && file.size <= length) { + return Promise.resolve(new Blob([file], { type: file.type || 'application/octet-stream' })) + } + + return Promise.resolve(new Blob([file.slice(start, start + length)], { type: 'application/octet-stream' })) +} + +/** + * Create a temporary upload workspace to upload the chunks to + * @param destinationFile The file name after finishing the chunked upload + * @param retries number of retries + * @param isPublic whether this upload is in a public share or not + * @param customHeaders Custom HTTP headers used when creating the workspace (e.g. X-NC-Nickname for file drops) + */ +export const initChunkWorkspace = async function(destinationFile: string | undefined = undefined, retries: number = 5, isPublic: boolean = false, customHeaders: Record = {}): Promise { + let chunksWorkspace: string + if (isPublic) { + chunksWorkspace = `${getBaseUrl()}/public.php/dav/uploads/${getSharingToken()}` + } else { + chunksWorkspace = generateRemoteUrl(`dav/uploads/${getCurrentUser()?.uid}`) + } + + const hash = [...Array(16)].map(() => Math.floor(Math.random() * 16).toString(16)).join('') + const tempWorkspace = `web-file-upload-${hash}` + const url = `${chunksWorkspace}/${tempWorkspace}` + const headers = customHeaders + if (destinationFile) { + headers.Destination = destinationFile + } + + await axios.request({ + method: 'MKCOL', + url, + headers, + 'axios-retry': { + retries, + retryDelay: (retryCount: number, error: AxiosError) => exponentialDelay(retryCount, error, 1000), + }, + }) + + logger.debug('Created temporary upload workspace', { url }) + + return url +} diff --git a/lib/upload/window.d.ts b/lib/upload/window.d.ts new file mode 100644 index 00000000..bac3a431 --- /dev/null +++ b/lib/upload/window.d.ts @@ -0,0 +1,21 @@ +/// +/** + * SPDX-FileCopyrightText: 2023 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import type { Uploader } from './lib/uploader' + +// This is for private use only +declare global { + interface Window { + _nc_uploader?: Uploader + + OC: Nextcloud.v28.OC & { + appConfig: { + files: { + max_chunk_size: number + } + } + } + } +} diff --git a/package-lock.json b/package-lock.json index 251bce25..da28ed4c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,13 +10,16 @@ "license": "AGPL-3.0-or-later", "dependencies": { "@nextcloud/auth": "^2.5.3", + "@nextcloud/axios": "^2.5.2", "@nextcloud/capabilities": "^1.2.1", "@nextcloud/l10n": "^3.4.1", "@nextcloud/logger": "^3.0.3", "@nextcloud/paths": "^3.0.0", "@nextcloud/router": "^3.1.0", "@nextcloud/sharing": "^0.4.0", + "axios-retry": "^4.5.0", "is-svg": "^6.1.0", + "p-queue": "^9.1.0", "typescript-event-target": "^1.1.2", "webdav": "^5.9.0" }, @@ -202,6 +205,7 @@ "integrity": "sha512-lWBYIrF7qK5+GjY5Uy+/hEgp8OJWOD/rpy74GplYRhEauvbHDeFB8t5hPOZxCZ0Oxf4Cc36tK51/l3ymJysrKw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.26.2", @@ -545,6 +549,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=20.19.0" }, @@ -585,6 +590,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=20.19.0" } @@ -1127,7 +1133,6 @@ "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", @@ -1143,7 +1148,6 @@ "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@eslint/core": "^0.17.0" }, @@ -1170,7 +1174,6 @@ "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", @@ -1195,7 +1198,6 @@ "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -1209,7 +1211,6 @@ "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -1239,7 +1240,6 @@ "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -1316,7 +1316,6 @@ "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=18.18.0" } @@ -1327,7 +1326,6 @@ "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" @@ -1342,7 +1340,6 @@ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=12.22" }, @@ -1367,7 +1364,6 @@ "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=18.18" }, @@ -1606,6 +1602,20 @@ "node": "^20.0.0 || ^22.0.0 || ^24.0.0" } }, + "node_modules/@nextcloud/axios": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/@nextcloud/axios/-/axios-2.5.2.tgz", + "integrity": "sha512-8frJb77jNMbz00TjsSqs1PymY0nIEbNM4mVmwen2tXY7wNgRai6uXilIlXKOYB9jR/F/HKRj6B4vUwVwZbhdbw==", + "license": "GPL-3.0-or-later", + "dependencies": { + "@nextcloud/auth": "^2.5.1", + "@nextcloud/router": "^3.0.1", + "axios": "^1.12.2" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || ^24.0.0" + } + }, "node_modules/@nextcloud/browser-storage": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/@nextcloud/browser-storage/-/browser-storage-0.5.0.tgz", @@ -1908,6 +1918,7 @@ "integrity": "sha512-dKYCMuPO1bmrpuogcjQ8z7ICCH3FP6WmxpwC03yjzGfZhj9fTJg6+bS1+UAplekbN2C+M61UNllGOOoAfGCrdQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@octokit/auth-token": "^4.0.0", "@octokit/graphql": "^7.1.0", @@ -2072,7 +2083,6 @@ "hasInstallScript": true, "license": "MIT", "optional": true, - "peer": true, "dependencies": { "detect-libc": "^1.0.3", "is-glob": "^4.0.3", @@ -2115,7 +2125,6 @@ "os": [ "android" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2137,7 +2146,6 @@ "os": [ "darwin" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2159,7 +2167,6 @@ "os": [ "darwin" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2181,7 +2188,6 @@ "os": [ "freebsd" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2203,7 +2209,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2225,7 +2230,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2247,7 +2251,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2269,7 +2272,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2291,7 +2293,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2313,7 +2314,6 @@ "os": [ "linux" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2335,7 +2335,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2357,7 +2356,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2379,7 +2377,6 @@ "os": [ "win32" ], - "peer": true, "engines": { "node": ">= 10.0.0" }, @@ -2812,6 +2809,7 @@ "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.3", "json-schema-traverse": "^1.0.0", @@ -3120,6 +3118,7 @@ "integrity": "sha512-m0jEgYlYz+mDJZ2+F4v8D1AyQb+QzsNqRuI7xg1VQX/KlKS0qT9r1Mo16yo5F/MtifXFgaofIFsdFMox2SxIbQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -3194,6 +3193,7 @@ "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.54.0", "@typescript-eslint/types": "8.54.0", @@ -3641,7 +3641,6 @@ "integrity": "sha512-5aBjvGqsWs+MoxswZPoTB9nSDb3dhd1x30xrrltKujlCxo48j8HGDNj3QPhF4VIS0VQDUrA1xUfp2hEa+FNyXA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/parser": "^7.28.0", "@vue/compiler-core": "3.5.18", @@ -3660,7 +3659,6 @@ "integrity": "sha512-xM16Ak7rSWHkM3m22NlmcdIM+K4BMyFARAfV9hYFl+SFuRzrZ3uGMNW05kA5pmeMa0X9X963Kgou7ufdbpOP9g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.18", "@vue/shared": "3.5.18" @@ -3734,7 +3732,6 @@ "integrity": "sha512-x0vPO5Imw+3sChLM5Y+B6G1zPjwdOri9e8V21NnTnlEvkxatHEH5B5KEAJcjuzQ7BsjGrKtfzuQ5eQwXh8HXBg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/shared": "3.5.18" } @@ -3745,7 +3742,6 @@ "integrity": "sha512-DUpHa1HpeOQEt6+3nheUfqVXRog2kivkXHUhoqJiKR33SO4x+a5uNOMkV487WPerQkL0vUuRvq/7JhRgLW3S+w==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/reactivity": "3.5.18", "@vue/shared": "3.5.18" @@ -3757,7 +3753,6 @@ "integrity": "sha512-YwDj71iV05j4RnzZnZtGaXwPoUWeRsqinblgVJwR8XTXYZ9D5PbahHQgsbmzUvCWNF6x7siQ89HgnX5eWkr3mw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/reactivity": "3.5.18", "@vue/runtime-core": "3.5.18", @@ -3771,7 +3766,6 @@ "integrity": "sha512-PvIHLUoWgSbDG7zLHqSqaCoZvHi6NNmfVFOqO+OnwvqMz/tqQr3FuGWS8ufluNddk7ZLBJYMrjcw1c6XzR12mA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-ssr": "3.5.18", "@vue/shared": "3.5.18" @@ -3793,6 +3787,7 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -3825,7 +3820,6 @@ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -3971,6 +3965,12 @@ "node": ">=12" } }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, "node_modules/available-typed-arrays": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", @@ -3987,6 +3987,30 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/axios": { + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "license": "MIT", + "peer": true, + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/axios-retry": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/axios-retry/-/axios-retry-4.5.0.tgz", + "integrity": "sha512-aR99oXhpEDGo0UuAlYcn2iGRds30k366Zfa05XWScR9QaQD4JYiP3/1Qt1u7YlefUOK+cn0CcwoL1oefavQUlQ==", + "license": "Apache-2.0", + "dependencies": { + "is-retry-allowed": "^2.2.0" + }, + "peerDependencies": { + "axios": "0.x || 1.x" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -4055,7 +4079,6 @@ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -4067,7 +4090,6 @@ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "optional": true, - "peer": true, "dependencies": { "fill-range": "^7.1.1" }, @@ -4244,6 +4266,7 @@ "url": "https://github.com/sponsors/ai" } ], + "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001688", "electron-to-chromium": "^1.5.73", @@ -4342,7 +4365,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -4375,7 +4397,6 @@ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=6" } @@ -4462,7 +4483,6 @@ "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "readdirp": "^4.0.1" }, @@ -4508,6 +4528,18 @@ "dev": true, "license": "MIT" }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, "node_modules/comment-parser": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/comment-parser/-/comment-parser-1.4.5.tgz", @@ -4537,8 +4569,7 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/confbox": { "version": "0.2.2", @@ -4646,7 +4677,6 @@ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -4756,8 +4786,7 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/data-uri-to-buffer": { "version": "4.0.1", @@ -4818,8 +4847,7 @@ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/define-data-property": { "version": "1.1.4", @@ -4855,6 +4883,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/deprecation": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", @@ -4893,7 +4930,6 @@ "dev": true, "license": "Apache-2.0", "optional": true, - "peer": true, "bin": { "detect-libc": "bin/detect-libc.js" }, @@ -4959,7 +4995,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.1", @@ -5015,7 +5050,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5025,7 +5059,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "dev": true, "engines": { "node": ">= 0.4" } @@ -5041,7 +5074,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0" @@ -5050,6 +5082,21 @@ "node": ">= 0.4" } }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/esbuild": { "version": "0.25.3", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.3.tgz", @@ -5360,7 +5407,6 @@ "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -5422,7 +5468,6 @@ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "dependencies": { "estraverse": "^5.2.0" }, @@ -5462,11 +5507,16 @@ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/eventemitter3": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", + "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==", + "license": "MIT" + }, "node_modules/events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", @@ -5516,16 +5566,14 @@ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/fast-uri": { "version": "3.0.6", @@ -5590,7 +5638,6 @@ "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "flat-cache": "^4.0.0" }, @@ -5604,7 +5651,6 @@ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "optional": true, - "peer": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -5634,7 +5680,6 @@ "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5648,8 +5693,27 @@ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", "dev": true, - "license": "ISC", - "peer": true + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } }, "node_modules/for-each": { "version": "0.3.5", @@ -5667,6 +5731,22 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/formdata-polyfill": { "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", @@ -5711,7 +5791,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -5739,7 +5818,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", @@ -5764,7 +5842,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "dev": true, "license": "MIT", "dependencies": { "dunder-proto": "^1.0.1", @@ -5790,7 +5867,6 @@ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "is-glob": "^4.0.3" }, @@ -5812,7 +5888,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5865,7 +5940,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -5878,7 +5952,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "dev": true, "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" @@ -5919,7 +5992,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, "dependencies": { "function-bind": "^1.1.2" }, @@ -6051,7 +6123,6 @@ "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">= 4" } @@ -6061,8 +6132,7 @@ "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.1.4.tgz", "integrity": "sha512-p6u1bG3YSnINT5RQmx/yRZBpenIl30kVxkTLDyHLIMk0gict704Q9n+thfDI7lTRm9vXdDYutVzXhzcThxTnXA==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/import-fresh": { "version": "3.3.1", @@ -6070,7 +6140,6 @@ "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -6098,7 +6167,6 @@ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=0.8.19" } @@ -6160,7 +6228,6 @@ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, - "peer": true, "engines": { "node": ">=0.10.0" } @@ -6190,7 +6257,6 @@ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, - "peer": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -6221,7 +6287,6 @@ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, "optional": true, - "peer": true, "engines": { "node": ">=0.12.0" } @@ -6265,6 +6330,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-retry-allowed": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-2.2.0.tgz", + "integrity": "sha512-XVm7LOeLpTW4jV19QSH38vkswxoLud8sQ57YwJVTPWdiaI9I8keEhGFpBlslyVsgdQy4Opg8QOLb8YRgsyZiQg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-svg": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-6.1.0.tgz", @@ -6307,8 +6384,7 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true, - "license": "ISC", - "peer": true + "license": "ISC" }, "node_modules/isomorphic-timers-promises": { "version": "1.0.1", @@ -6419,7 +6495,6 @@ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "argparse": "^2.0.1" }, @@ -6514,24 +6589,21 @@ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/json5": { "version": "2.2.3", @@ -6564,7 +6636,6 @@ "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "json-buffer": "3.0.1" } @@ -6649,8 +6720,7 @@ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/lru-cache": { "version": "5.1.1", @@ -6740,7 +6810,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -6800,7 +6869,6 @@ "dev": true, "license": "MIT", "optional": true, - "peer": true, "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -6830,6 +6898,27 @@ "dev": true, "license": "MIT" }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", @@ -6850,7 +6939,6 @@ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -6960,8 +7048,7 @@ "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", "dev": true, "license": "MIT", - "optional": true, - "peer": true + "optional": true }, "node_modules/node-domexception": { "version": "1.0.0", @@ -7153,7 +7240,6 @@ "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", @@ -7203,6 +7289,34 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/p-queue": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-9.1.0.tgz", + "integrity": "sha512-O/ZPaXuQV29uSLbxWBGGZO1mCQXV2BLIwUr59JUU9SoH76mnYvtms7aafH/isNSNGwuEfP6W/4xD0/TJXxrizw==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^5.0.1", + "p-timeout": "^7.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-7.0.1.tgz", + "integrity": "sha512-AxTM2wDGORHGEkPCt8yqxOTMgpfbEHqF51f/5fJCmwFC3C/zNcGT63SymH2ttOAaiIws2zVg4+izQCjrakcwHg==", + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/package-name-regex": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/package-name-regex/-/package-name-regex-2.0.6.tgz", @@ -7229,7 +7343,6 @@ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "callsites": "^3.0.0" }, @@ -7318,7 +7431,6 @@ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=8" } @@ -7371,7 +7483,6 @@ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, "optional": true, - "peer": true, "engines": { "node": ">=8.6" }, @@ -7484,6 +7595,12 @@ "dev": true, "license": "MIT" }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, "node_modules/public-encrypt": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", @@ -7613,7 +7730,6 @@ "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">= 14.18.0" }, @@ -7673,7 +7789,6 @@ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=4" } @@ -7761,6 +7876,7 @@ "integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/estree": "1.0.8" }, @@ -8032,7 +8148,6 @@ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "shebang-regex": "^3.0.0" }, @@ -8046,7 +8161,6 @@ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=8" } @@ -8468,6 +8582,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8526,7 +8641,6 @@ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, "optional": true, - "peer": true, "dependencies": { "is-number": "^7.0.0" }, @@ -8706,6 +8820,7 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -8911,6 +9026,7 @@ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", @@ -9524,6 +9640,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9537,6 +9654,7 @@ "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "4.0.18", "@vitest/mocker": "4.0.18", @@ -9665,7 +9783,6 @@ "integrity": "sha512-CydUvFOQKD928UzZhTp4pr2vWz1L+H99t7Pkln2QSPdvmURT0MoC4wUccfCnuEaihNsu9aYYyk+bep8rlfkUXw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "debug": "^4.4.0", "eslint-scope": "^8.2.0", @@ -9690,7 +9807,6 @@ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", "dev": true, "license": "ISC", - "peer": true, "bin": { "semver": "bin/semver.js" }, @@ -9835,7 +9951,6 @@ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "isexe": "^2.0.0" }, @@ -9891,7 +10006,6 @@ "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } diff --git a/package.json b/package.json index 609b37fd..3a44bb1b 100644 --- a/package.json +++ b/package.json @@ -50,13 +50,16 @@ }, "dependencies": { "@nextcloud/auth": "^2.5.3", + "@nextcloud/axios": "^2.5.2", "@nextcloud/capabilities": "^1.2.1", "@nextcloud/l10n": "^3.4.1", "@nextcloud/logger": "^3.0.3", "@nextcloud/paths": "^3.0.0", "@nextcloud/router": "^3.1.0", "@nextcloud/sharing": "^0.4.0", + "axios-retry": "^4.5.0", "is-svg": "^6.1.0", + "p-queue": "^9.1.0", "typescript-event-target": "^1.1.2", "webdav": "^5.9.0" }, From 54c1c69802a9706a46c3be127f83070ae712bb64 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:32:18 +0100 Subject: [PATCH 02/14] refactor(upload): resolve linter issues (`npm run lint:fix`) Signed-off-by: Ferdinand Thiessen --- lib/upload/errors/UploadCancelledError.ts | 2 - lib/upload/getUploader.ts | 1 - lib/upload/index.ts | 4 +- lib/upload/upload.ts | 9 ++-- lib/upload/uploader/eta.ts | 5 +-- lib/upload/uploader/index.ts | 1 + lib/upload/uploader/uploader.ts | 54 ++++++++++++----------- lib/upload/utils/config.ts | 8 +++- lib/upload/utils/conflicts.ts | 20 +++++---- lib/upload/utils/fileTree.ts | 14 +++--- lib/upload/utils/l10n.ts | 2 +- lib/upload/utils/upload.ts | 14 +++--- lib/upload/window.d.ts | 4 +- 13 files changed, 74 insertions(+), 64 deletions(-) diff --git a/lib/upload/errors/UploadCancelledError.ts b/lib/upload/errors/UploadCancelledError.ts index 8b0ad994..b2de557a 100644 --- a/lib/upload/errors/UploadCancelledError.ts +++ b/lib/upload/errors/UploadCancelledError.ts @@ -5,9 +5,7 @@ import { t } from '../utils/l10n.ts' export class UploadCancelledError extends Error { - public constructor(cause?: unknown) { super(t('Upload has been cancelled'), { cause }) } - } diff --git a/lib/upload/getUploader.ts b/lib/upload/getUploader.ts index 0d810cda..19505a28 100644 --- a/lib/upload/getUploader.ts +++ b/lib/upload/getUploader.ts @@ -4,7 +4,6 @@ */ import { isPublicShare } from '@nextcloud/sharing/public' - import { Uploader } from './uploader/uploader.ts' /** diff --git a/lib/upload/index.ts b/lib/upload/index.ts index c429cfbb..365c0358 100644 --- a/lib/upload/index.ts +++ b/lib/upload/index.ts @@ -4,9 +4,9 @@ */ export type { Eta, EtaEventsMap } from './uploader/index.ts' -export type { IDirectory, Directory } from './utils/fileTree.ts' +export type { Directory, IDirectory } from './utils/fileTree.ts' export { getUploader, upload } from './getUploader.ts' export { Upload, Status as UploadStatus } from './upload.ts' -export { Uploader, UploaderStatus, EtaStatus } from './uploader/index.ts' +export { EtaStatus, Uploader, UploaderStatus } from './uploader/index.ts' export { getConflicts, hasConflict } from './utils/conflicts.ts' diff --git a/lib/upload/upload.ts b/lib/upload/upload.ts index 59dfea2a..47366e91 100644 --- a/lib/upload/upload.ts +++ b/lib/upload/upload.ts @@ -3,6 +3,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ import type { AxiosResponse } from 'axios' + import { getMaxChunksSize } from './utils/config.js' export enum Status { @@ -14,7 +15,6 @@ export enum Status { FAILED = 5, } export class Upload { - private _source: string private _file: File private _isChunked: boolean @@ -26,7 +26,7 @@ export class Upload { private _status: Status = Status.INITIALIZED private _controller: AbortController - private _response: AxiosResponse|null = null + private _response: AxiosResponse | null = null constructor(source: string, chunked = false, size: number, file: File) { const chunks = Math.min(getMaxChunksSize() > 0 ? Math.ceil(size / getMaxChunksSize()) : 1, 10000) @@ -62,11 +62,11 @@ export class Upload { return this._startTime } - set response(response: AxiosResponse|null) { + set response(response: AxiosResponse | null) { this._response = response } - get response(): AxiosResponse|null { + get response(): AxiosResponse | null { return this._response } @@ -120,5 +120,4 @@ export class Upload { this._controller.abort() this._status = Status.CANCELLED } - } diff --git a/lib/upload/uploader/eta.ts b/lib/upload/uploader/eta.ts index b12dc57a..c7fd4f83 100644 --- a/lib/upload/uploader/eta.ts +++ b/lib/upload/uploader/eta.ts @@ -3,9 +3,9 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +import { formatFileSize } from '@nextcloud/files' import { TypedEventTarget } from 'typescript-event-target' import { n, t } from '../utils/l10n.ts' -import { formatFileSize } from '@nextcloud/files' export enum EtaStatus { Idle = 0, @@ -30,7 +30,6 @@ export interface EtaEventsMap { } export class Eta extends TypedEventTarget { - /** Bytes done */ private _done: number = 0 /** Total bytes to do */ @@ -69,6 +68,7 @@ export class Eta extends TypedEventTarget { /** * Add more transferred bytes. + * * @param done Additional bytes done. */ public add(done: number): void { @@ -221,5 +221,4 @@ export class Eta extends TypedEventTarget { ? `${formatFileSize(this._speed, true)}∕s` : '' } - } diff --git a/lib/upload/uploader/index.ts b/lib/upload/uploader/index.ts index 1111408d..ba0e2d34 100644 --- a/lib/upload/uploader/index.ts +++ b/lib/upload/uploader/index.ts @@ -6,6 +6,7 @@ export { type Eta, type EtaEventsMap, + EtaStatus, } from './eta.ts' diff --git a/lib/upload/uploader/uploader.ts b/lib/upload/uploader/uploader.ts index 0f96c43e..16db5ed9 100644 --- a/lib/upload/uploader/uploader.ts +++ b/lib/upload/uploader/uploader.ts @@ -7,33 +7,30 @@ import type { WebDAVClient } from 'webdav' import type { IDirectory } from '../utils/fileTree.ts' import { getCurrentUser } from '@nextcloud/auth' -import { FileType, Folder, Permission, davGetClient, davRemoteURL, davRootPath } from '@nextcloud/files' -import { encodePath } from '@nextcloud/paths' -import { normalize } from 'path' -import { getCapabilities } from '@nextcloud/capabilities' - import axios, { isCancel } from '@nextcloud/axios' +import { getCapabilities } from '@nextcloud/capabilities' +import { davGetClient, davRemoteURL, davRootPath, FileType, Folder, Permission } from '@nextcloud/files' +import { encodePath } from '@nextcloud/paths' import PCancelable from 'p-cancelable' import PQueue from 'p-queue' - +import { normalize } from 'path' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' -import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' +import { Upload, Status as UploadStatus } from '../upload.ts' import { getMaxChunksSize } from '../utils/config.ts' -import { Status as UploadStatus, Upload } from '../upload.ts' import { isFileSystemFileEntry } from '../utils/filesystem.ts' import { Directory } from '../utils/fileTree.ts' import { t } from '../utils/l10n.ts' import logger from '../utils/logger.ts' +import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' import { Eta } from './eta.ts' export enum UploaderStatus { IDLE = 0, UPLOADING = 1, - PAUSED = 2 + PAUSED = 2, } export class Uploader { - // Initialized via setter in the constructor private _destinationFolder!: Folder private _isPublic: boolean @@ -58,8 +55,8 @@ export class Uploader { /** * Initialize uploader * - * @param {boolean} isPublic are we in public mode ? - * @param {Folder} destinationFolder the context folder to operate, relative to the root folder + * @param isPublic are we in public mode ? + * @param destinationFolder the context folder to operate, relative to the root folder */ constructor( isPublic = false, @@ -135,6 +132,7 @@ export class Uploader { /** * Set a custom header + * * @param name The header to set * @param value The string value */ @@ -144,6 +142,7 @@ export class Uploader { /** * Unset a custom header + * * @param name The header to unset */ deleteCustomerHeader(name: string): void { @@ -215,9 +214,9 @@ export class Uploader { } private updateStats() { - const size = this._uploadQueue.map(upload => upload.size) + const size = this._uploadQueue.map((upload) => upload.size) .reduce((partialSum, a) => partialSum + a, 0) - const uploaded = this._uploadQueue.map(upload => upload.uploaded) + const uploaded = this._uploadQueue.map((upload) => upload.uploaded) .reduce((partialSum, a) => partialSum + a, 0) this._eta.update(uploaded, size) @@ -242,6 +241,7 @@ export class Uploader { /** * Notify listeners of the upload completion + * * @param upload The upload that finished */ private _notifyAll(upload: Upload): void { @@ -256,9 +256,10 @@ export class Uploader { /** * Uploads multiple files or folders while preserving the relative path (if available) - * @param {string} destination The destination path relative to the root folder. e.g. /foo/bar (a file "a.txt" will be uploaded then to "/foo/bar/a.txt") - * @param {Array} files The files and/or folders to upload - * @param {Function} callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) + * + * @param destination The destination path relative to the root folder. e.g. /foo/bar (a file "a.txt" will be uploaded then to "/foo/bar/a.txt") + * @param files The files and/or folders to upload + * @param callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) * @return Cancelable promise that resolves to an array of uploads * * @example @@ -287,11 +288,11 @@ export class Uploader { */ batchUpload( destination: string, - files: (File|FileSystemEntry)[], - callback?: (nodes: Array, currentPath: string) => Promise|false>, + files: (File | FileSystemEntry)[], + callback?: (nodes: Array, currentPath: string) => Promise | false>, ): PCancelable { if (!callback) { - callback = async (files: Array) => files + callback = async (files: Array) => files } return new PCancelable(async (resolve, reject, onCancel) => { @@ -338,6 +339,7 @@ export class Uploader { /** * Helper to create a directory wrapped inside an Upload class + * * @param destination Destination where to create the directory * @param directory The directory to create * @param client The cached WebDAV client @@ -393,7 +395,7 @@ export class Uploader { private uploadDirectory( destination: string, directory: Directory, - callback: (nodes: Array, currentPath: string) => Promise|false>, + callback: (nodes: Array, currentPath: string) => Promise | false>, // client as parameter to cache it for performance client: WebDAVClient, ): PCancelable { @@ -454,12 +456,13 @@ export class Uploader { /** * Upload a file to the given path - * @param {string} destination the destination path relative to the root folder. e.g. /foo/bar.txt - * @param {File|FileSystemFileEntry} fileHandle the file to upload - * @param {string} root the root folder to upload to + * + * @param destination the destination path relative to the root folder. e.g. /foo/bar.txt + * @param fileHandle the file to upload + * @param root the root folder to upload to * @param retries number of retries */ - upload(destination: string, fileHandle: File|FileSystemFileEntry, root?: string, retries: number = 5): PCancelable { + upload(destination: string, fileHandle: File | FileSystemFileEntry, root?: string, retries: number = 5): PCancelable { root = root || this.root const destinationPath = `${root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` @@ -704,5 +707,4 @@ export class Uploader { } return {} } - } diff --git a/lib/upload/utils/config.ts b/lib/upload/utils/config.ts index 587feeda..511460a6 100644 --- a/lib/upload/utils/config.ts +++ b/lib/upload/utils/config.ts @@ -2,7 +2,13 @@ * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ -export const getMaxChunksSize = function(fileSize: number | undefined = undefined): number { + +/** + * Get the maximum chunk size for chunked uploads based on the server configuration and file size. + * + * @param fileSize - The size of the file to be uploaded. If not provided, the function will return the default chunk size. + */ +export function getMaxChunksSize(fileSize: number | undefined = undefined): number { const maxChunkSize = window.OC?.appConfig?.files?.max_chunk_size if (maxChunkSize <= 0) { return 0 diff --git a/lib/upload/utils/conflicts.ts b/lib/upload/utils/conflicts.ts index dc3d554d..a7d4a10e 100644 --- a/lib/upload/utils/conflicts.ts +++ b/lib/upload/utils/conflicts.ts @@ -7,23 +7,25 @@ import type { Node } from '@nextcloud/files' /** * Check if there is a conflict between two sets of files - * @param {Array} files the incoming files - * @param {Node[]} content all the existing files in the directory - * @return {boolean} true if there is a conflict + * + * @param files the incoming files + * @param content all the existing files in the directory + * @return true if there is a conflict */ -export function hasConflict(files: (File|FileSystemEntry|Node)[], content: Node[]): boolean { +export function hasConflict(files: (File | FileSystemEntry | Node)[], content: Node[]): boolean { return getConflicts(files, content).length > 0 } /** * Get the conflicts between two sets of files - * @param {Array} files the incoming files - * @param {Node[]} content all the existing files in the directory - * @return {boolean} true if there is a conflict + * + * @param files the incoming files + * @param content all the existing files in the directory + * @return true if there is a conflict */ -export function getConflicts(files: T[], content: Node[]): T[] { +export function getConflicts(files: T[], content: Node[]): T[] { const contentNames = content.map((node: Node) => node.basename) - const conflicts = files.filter((node: File|FileSystemEntry|Node) => { + const conflicts = files.filter((node: File | FileSystemEntry | Node) => { const name = 'basename' in node ? node.basename : node.name return contentNames.indexOf(name) !== -1 }) diff --git a/lib/upload/utils/fileTree.ts b/lib/upload/utils/fileTree.ts index e11aaeda..e283d820 100644 --- a/lib/upload/utils/fileTree.ts +++ b/lib/upload/utils/fileTree.ts @@ -14,10 +14,9 @@ import { isFileSystemDirectoryEntry, isFileSystemFileEntry } from './filesystem. * It allows to create virtual directories */ export class Directory extends File { - private _originalName: string private _path: string - private _children: Map + private _children: Map constructor(path: string) { super([], basename(path), { type: 'httpd/unix-directory', lastModified: 0 }) @@ -39,7 +38,7 @@ export class Directory extends File { return this._originalName } - get children(): Array { + get children(): Array { return Array.from(this._children.values()) } @@ -47,15 +46,16 @@ export class Directory extends File { return this._path } - getChild(name: string): File|Directory|null { + getChild(name: string): File | Directory | null { return this._children.get(name) ?? null } /** * Add multiple children at once + * * @param files The files to add */ - async addChildren(files: Array): Promise { + async addChildren(files: Array): Promise { for (const file of files) { await this.addChild(file) } @@ -64,9 +64,10 @@ export class Directory extends File { /** * Add a child to the directory. * If it is a nested child the parents will be created if not already exist. + * * @param file The child to add */ - async addChild(file: File|FileSystemEntry) { + async addChild(file: File | FileSystemEntry) { const rootPath = this._path && `${this._path}/` if (isFileSystemFileEntry(file)) { file = await new Promise((resolve, reject) => (file as FileSystemFileEntry).file(resolve, reject)) @@ -118,7 +119,6 @@ export class Directory extends File { } } } - } /** diff --git a/lib/upload/utils/l10n.ts b/lib/upload/utils/l10n.ts index 49c9443b..2f6c6879 100644 --- a/lib/upload/utils/l10n.ts +++ b/lib/upload/utils/l10n.ts @@ -8,7 +8,7 @@ const gtBuilder = getGettextBuilder() .detectLocale() // @ts-expect-error __TRANSLATIONS__ is replaced by vite -__TRANSLATIONS__.map(data => gtBuilder.addTranslation(data.locale, data.json)) +__TRANSLATIONS__.map((data) => gtBuilder.addTranslation(data.locale, data.json)) interface Gettext { /** diff --git a/lib/upload/utils/upload.ts b/lib/upload/utils/upload.ts index 869ad295..ddf4a4ed 100644 --- a/lib/upload/utils/upload.ts +++ b/lib/upload/utils/upload.ts @@ -1,4 +1,4 @@ -/** +/*! * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ @@ -7,6 +7,7 @@ import type { AxiosError, AxiosProgressEvent, AxiosResponse } from 'axios' import { getCurrentUser } from '@nextcloud/auth' import axios from '@nextcloud/axios' +import { generateRemoteUrl, getBaseUrl } from '@nextcloud/router' import { getSharingToken } from '@nextcloud/sharing/public' import axiosRetry, { exponentialDelay, isNetworkOrIdempotentRequestError } from 'axios-retry' import logger from './logger.ts' @@ -25,13 +26,14 @@ interface UploadDataOptions { /** The final destination file (for chunked uploads) */ destinationFile?: string /** Additional headers */ - headers?: Record + headers?: Record /** Number of retries */ - retries?: number, + retries?: number } /** * Upload some data to a given path + * * @param url the url to upload to * @param uploadData the data to upload * @param uploadOptions upload options @@ -99,11 +101,12 @@ export async function uploadData( /** * Get chunk of the file. * Doing this on the fly give us a big performance boost and proper garbage collection + * * @param file File to upload * @param start Offset to start upload * @param length Size of chunk to upload */ -export const getChunk = function(file: File, start: number, length: number): Promise { +export function getChunk(file: File, start: number, length: number): Promise { if (start === 0 && file.size <= length) { return Promise.resolve(new Blob([file], { type: file.type || 'application/octet-stream' })) } @@ -113,12 +116,13 @@ export const getChunk = function(file: File, start: number, length: number): Pro /** * Create a temporary upload workspace to upload the chunks to + * * @param destinationFile The file name after finishing the chunked upload * @param retries number of retries * @param isPublic whether this upload is in a public share or not * @param customHeaders Custom HTTP headers used when creating the workspace (e.g. X-NC-Nickname for file drops) */ -export const initChunkWorkspace = async function(destinationFile: string | undefined = undefined, retries: number = 5, isPublic: boolean = false, customHeaders: Record = {}): Promise { +export async function initChunkWorkspace(destinationFile: string | undefined = undefined, retries: number = 5, isPublic: boolean = false, customHeaders: Record = {}): Promise { let chunksWorkspace: string if (isPublic) { chunksWorkspace = `${getBaseUrl()}/public.php/dav/uploads/${getSharingToken()}` diff --git a/lib/upload/window.d.ts b/lib/upload/window.d.ts index bac3a431..1f500bb8 100644 --- a/lib/upload/window.d.ts +++ b/lib/upload/window.d.ts @@ -1,9 +1,9 @@ /// -/** +/*! * SPDX-FileCopyrightText: 2023 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ -import type { Uploader } from './lib/uploader' +import type { Uploader } from './uploader/index.ts' // This is for private use only declare global { From 08186f8c94ff7beb9704faf197e2c06d8a46d373 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:34:09 +0100 Subject: [PATCH 03/14] refactor(upload): drop duplicated logger Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/uploader.ts | 2 +- lib/upload/utils/logger.ts | 11 ----------- lib/upload/utils/upload.ts | 2 +- 3 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 lib/upload/utils/logger.ts diff --git a/lib/upload/uploader/uploader.ts b/lib/upload/uploader/uploader.ts index 16db5ed9..10bd04bb 100644 --- a/lib/upload/uploader/uploader.ts +++ b/lib/upload/uploader/uploader.ts @@ -14,13 +14,13 @@ import { encodePath } from '@nextcloud/paths' import PCancelable from 'p-cancelable' import PQueue from 'p-queue' import { normalize } from 'path' +import logger from '../../utils/logger.ts' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' import { Upload, Status as UploadStatus } from '../upload.ts' import { getMaxChunksSize } from '../utils/config.ts' import { isFileSystemFileEntry } from '../utils/filesystem.ts' import { Directory } from '../utils/fileTree.ts' import { t } from '../utils/l10n.ts' -import logger from '../utils/logger.ts' import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' import { Eta } from './eta.ts' diff --git a/lib/upload/utils/logger.ts b/lib/upload/utils/logger.ts deleted file mode 100644 index 1c50c431..00000000 --- a/lib/upload/utils/logger.ts +++ /dev/null @@ -1,11 +0,0 @@ -/** - * SPDX-FileCopyrightText: 2019 Nextcloud GmbH and Nextcloud contributors - * SPDX-License-Identifier: AGPL-3.0-or-later - */ - -import { getLoggerBuilder } from '@nextcloud/logger' - -export default getLoggerBuilder() - .setApp('@nextcloud/upload') - .detectUser() - .build() diff --git a/lib/upload/utils/upload.ts b/lib/upload/utils/upload.ts index ddf4a4ed..83f28d16 100644 --- a/lib/upload/utils/upload.ts +++ b/lib/upload/utils/upload.ts @@ -10,7 +10,7 @@ import axios from '@nextcloud/axios' import { generateRemoteUrl, getBaseUrl } from '@nextcloud/router' import { getSharingToken } from '@nextcloud/sharing/public' import axiosRetry, { exponentialDelay, isNetworkOrIdempotentRequestError } from 'axios-retry' -import logger from './logger.ts' +import logger from '../../utils/logger.ts' axiosRetry(axios, { retries: 0 }) From 3a1a478b25b9a8f8026a173e362946cf34d6903b Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:36:26 +0100 Subject: [PATCH 04/14] refactor(upload): merge window declarations Signed-off-by: Ferdinand Thiessen --- lib/upload/window.d.ts | 21 --------------------- lib/window.d.ts | 8 +++++++- 2 files changed, 7 insertions(+), 22 deletions(-) delete mode 100644 lib/upload/window.d.ts diff --git a/lib/upload/window.d.ts b/lib/upload/window.d.ts deleted file mode 100644 index 1f500bb8..00000000 --- a/lib/upload/window.d.ts +++ /dev/null @@ -1,21 +0,0 @@ -/// -/*! - * SPDX-FileCopyrightText: 2023 Nextcloud GmbH and Nextcloud contributors - * SPDX-License-Identifier: AGPL-3.0-or-later - */ -import type { Uploader } from './uploader/index.ts' - -// This is for private use only -declare global { - interface Window { - _nc_uploader?: Uploader - - OC: Nextcloud.v28.OC & { - appConfig: { - files: { - max_chunk_size: number - } - } - } - } -} diff --git a/lib/window.d.ts b/lib/window.d.ts index 519145e3..6dd4ac3e 100644 --- a/lib/window.d.ts +++ b/lib/window.d.ts @@ -6,7 +6,13 @@ declare global { interface Window { - OC: Nextcloud.v32.OC + OC: Nextcloud.v32.OC & { + appConfig: { + files: { + max_chunk_size: number + } + } + } // eslint-disable-next-line @typescript-eslint/no-explicit-any OCA: any _nc_files_scope?: Record> From d6ac1abf6ec92914cf204fc73f2c73bb05315081 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:43:31 +0100 Subject: [PATCH 05/14] refactor(upload): use consistent case for filenames Signed-off-by: Ferdinand Thiessen --- lib/upload/getUploader.ts | 3 ++- lib/upload/index.ts | 2 +- lib/upload/uploader/{eta.spec.ts => Eta.spec.ts} | 3 ++- lib/upload/uploader/{eta.ts => Eta.ts} | 0 lib/upload/{upload.ts => uploader/Upload.ts} | 6 ++++-- lib/upload/uploader/{uploader.ts => Uploader.ts} | 7 ++++--- lib/upload/uploader/index.ts | 4 ++-- 7 files changed, 15 insertions(+), 10 deletions(-) rename lib/upload/uploader/{eta.spec.ts => Eta.spec.ts} (99%) rename lib/upload/uploader/{eta.ts => Eta.ts} (100%) rename lib/upload/{upload.ts => uploader/Upload.ts} (97%) rename lib/upload/uploader/{uploader.ts => Uploader.ts} (99%) diff --git a/lib/upload/getUploader.ts b/lib/upload/getUploader.ts index 19505a28..7f9723e8 100644 --- a/lib/upload/getUploader.ts +++ b/lib/upload/getUploader.ts @@ -4,7 +4,8 @@ */ import { isPublicShare } from '@nextcloud/sharing/public' -import { Uploader } from './uploader/uploader.ts' +import { scopedGlobals } from '../globalScope.ts' +import { Uploader } from './uploader/Uploader.ts' /** * Get the global Uploader instance. diff --git a/lib/upload/index.ts b/lib/upload/index.ts index 365c0358..6af332a4 100644 --- a/lib/upload/index.ts +++ b/lib/upload/index.ts @@ -7,6 +7,6 @@ export type { Eta, EtaEventsMap } from './uploader/index.ts' export type { Directory, IDirectory } from './utils/fileTree.ts' export { getUploader, upload } from './getUploader.ts' -export { Upload, Status as UploadStatus } from './upload.ts' +export { Upload, Status as UploadStatus } from './uploader/Upload.ts' export { EtaStatus, Uploader, UploaderStatus } from './uploader/index.ts' export { getConflicts, hasConflict } from './utils/conflicts.ts' diff --git a/lib/upload/uploader/eta.spec.ts b/lib/upload/uploader/Eta.spec.ts similarity index 99% rename from lib/upload/uploader/eta.spec.ts rename to lib/upload/uploader/Eta.spec.ts index 39244bd3..57a46486 100644 --- a/lib/upload/uploader/eta.spec.ts +++ b/lib/upload/uploader/Eta.spec.ts @@ -2,8 +2,9 @@ * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ + import { afterAll, beforeAll, describe, expect, it, test, vi } from 'vitest' -import { Eta, EtaStatus } from './eta.ts' +import { Eta, EtaStatus } from './Eta.ts' describe('ETA - status', () => { it('has default set', () => { diff --git a/lib/upload/uploader/eta.ts b/lib/upload/uploader/Eta.ts similarity index 100% rename from lib/upload/uploader/eta.ts rename to lib/upload/uploader/Eta.ts diff --git a/lib/upload/upload.ts b/lib/upload/uploader/Upload.ts similarity index 97% rename from lib/upload/upload.ts rename to lib/upload/uploader/Upload.ts index 47366e91..ed6ac548 100644 --- a/lib/upload/upload.ts +++ b/lib/upload/uploader/Upload.ts @@ -1,10 +1,11 @@ -/** +/*! * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ + import type { AxiosResponse } from 'axios' -import { getMaxChunksSize } from './utils/config.js' +import { getMaxChunksSize } from '../utils/config.ts' export enum Status { INITIALIZED = 0, @@ -14,6 +15,7 @@ export enum Status { CANCELLED = 4, FAILED = 5, } + export class Upload { private _source: string private _file: File diff --git a/lib/upload/uploader/uploader.ts b/lib/upload/uploader/Uploader.ts similarity index 99% rename from lib/upload/uploader/uploader.ts rename to lib/upload/uploader/Uploader.ts index 10bd04bb..350ee22a 100644 --- a/lib/upload/uploader/uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -1,7 +1,8 @@ -/** +/*! * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ + import type { AxiosError, AxiosResponse } from 'axios' import type { WebDAVClient } from 'webdav' import type { IDirectory } from '../utils/fileTree.ts' @@ -16,13 +17,13 @@ import PQueue from 'p-queue' import { normalize } from 'path' import logger from '../../utils/logger.ts' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' -import { Upload, Status as UploadStatus } from '../upload.ts' import { getMaxChunksSize } from '../utils/config.ts' import { isFileSystemFileEntry } from '../utils/filesystem.ts' import { Directory } from '../utils/fileTree.ts' import { t } from '../utils/l10n.ts' import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' -import { Eta } from './eta.ts' +import { Eta } from './Eta.ts' +import { Upload, Status as UploadStatus } from './Upload.ts' export enum UploaderStatus { IDLE = 0, diff --git a/lib/upload/uploader/index.ts b/lib/upload/uploader/index.ts index ba0e2d34..13de3ad6 100644 --- a/lib/upload/uploader/index.ts +++ b/lib/upload/uploader/index.ts @@ -8,9 +8,9 @@ export { type EtaEventsMap, EtaStatus, -} from './eta.ts' +} from './Eta.ts' export { Uploader, UploaderStatus, -} from './uploader.ts' +} from './Uploader.ts' From 5882bded4b1d6115555e919ece150b08e33af4f4 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:43:48 +0100 Subject: [PATCH 06/14] fix(upload): use scoped globals for the files uploader Signed-off-by: Ferdinand Thiessen --- lib/globalScope.ts | 3 +++ lib/upload/getUploader.ts | 9 +++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/globalScope.ts b/lib/globalScope.ts index 30cb40cb..27c97e69 100644 --- a/lib/globalScope.ts +++ b/lib/globalScope.ts @@ -13,6 +13,7 @@ import type { import type { IFileAction, IFileListAction } from './ui/actions/index.ts' import type { FilesRegistry } from './ui/registry.ts' import type { ISidebarAction, ISidebarTab } from './ui/sidebar/index.ts' +import type { Uploader } from './upload/index.ts' interface InternalGlobalScope { davNamespaces?: DavProperty @@ -22,6 +23,8 @@ interface InternalGlobalScope { navigation?: Navigation registry?: FilesRegistry + uploader?: Uploader + fileActions?: Map fileListActions?: Map fileListFilters?: Map diff --git a/lib/upload/getUploader.ts b/lib/upload/getUploader.ts index 7f9723e8..ed28bfd7 100644 --- a/lib/upload/getUploader.ts +++ b/lib/upload/getUploader.ts @@ -11,15 +11,16 @@ import { Uploader } from './uploader/Uploader.ts' * Get the global Uploader instance. * * Note: If you need a local uploader you can just create a new instance, - * this global instance will be shared with other apps. + * this global instance will be shared with other apps and is mostly useful + * for the Files app web UI to keep track of all uploads and their progress. * * @param isPublic Set to true to use public upload endpoint (by default it is auto detected) * @param forceRecreate Force a new uploader instance - main purpose is for testing */ export function getUploader(isPublic: boolean = isPublicShare(), forceRecreate = false): Uploader { - if (forceRecreate || window._nc_uploader === undefined) { - window._nc_uploader = new Uploader(isPublic) + if (forceRecreate || scopedGlobals.uploader === undefined) { + scopedGlobals.uploader = new Uploader(isPublic) } - return window._nc_uploader + return scopedGlobals.uploader } From 33b5612fe833e55b2b6dd7dc21865b4f7a785a07 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 00:48:57 +0100 Subject: [PATCH 07/14] fix(upload): use proper local imports for files API Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/Eta.ts | 2 +- lib/upload/uploader/Uploader.ts | 20 ++++++++++++-------- lib/upload/utils/conflicts.ts | 12 ++++++------ lib/upload/utils/fileTree.ts | 2 +- lib/upload/utils/l10n.ts | 2 +- 5 files changed, 21 insertions(+), 17 deletions(-) diff --git a/lib/upload/uploader/Eta.ts b/lib/upload/uploader/Eta.ts index c7fd4f83..bce9624a 100644 --- a/lib/upload/uploader/Eta.ts +++ b/lib/upload/uploader/Eta.ts @@ -3,8 +3,8 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -import { formatFileSize } from '@nextcloud/files' import { TypedEventTarget } from 'typescript-event-target' +import { formatFileSize } from '../../utils/fileSize.ts' import { n, t } from '../utils/l10n.ts' export enum EtaStatus { diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts index 350ee22a..07f8a081 100644 --- a/lib/upload/uploader/Uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -5,16 +5,20 @@ import type { AxiosError, AxiosResponse } from 'axios' import type { WebDAVClient } from 'webdav' +import type { IFolder } from '../../node/folder.ts' import type { IDirectory } from '../utils/fileTree.ts' import { getCurrentUser } from '@nextcloud/auth' import axios, { isCancel } from '@nextcloud/axios' import { getCapabilities } from '@nextcloud/capabilities' -import { davGetClient, davRemoteURL, davRootPath, FileType, Folder, Permission } from '@nextcloud/files' import { encodePath } from '@nextcloud/paths' import PCancelable from 'p-cancelable' import PQueue from 'p-queue' import { normalize } from 'path' +import { defaultRemoteURL, defaultRootPath, getClient } from '../../dav/dav.ts' +import { FileType } from '../../node/fileType.ts' +import { Folder } from '../../node/folder.ts' +import { Permission } from '../../permissions.ts' import logger from '../../utils/logger.ts' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' import { getMaxChunksSize } from '../utils/config.ts' @@ -33,7 +37,7 @@ export enum UploaderStatus { export class Uploader { // Initialized via setter in the constructor - private _destinationFolder!: Folder + private _destinationFolder!: IFolder private _isPublic: boolean private _customHeaders: Record @@ -61,13 +65,13 @@ export class Uploader { */ constructor( isPublic = false, - destinationFolder?: Folder, + destinationFolder?: IFolder, ) { this._isPublic = isPublic this._customHeaders = {} if (!destinationFolder) { - const source = `${davRemoteURL}${davRootPath}` + const source = `${defaultRemoteURL}${defaultRootPath}` let owner: string if (isPublic) { @@ -84,7 +88,7 @@ export class Uploader { id: 0, owner, permissions: Permission.ALL, - root: davRootPath, + root: defaultRootPath, source, }) } @@ -101,14 +105,14 @@ export class Uploader { /** * Get the upload destination path relative to the root folder */ - get destination(): Folder { + get destination(): IFolder { return this._destinationFolder } /** * Set the upload destination path relative to the root folder */ - set destination(folder: Folder) { + set destination(folder: IFolder) { if (!folder || folder.type !== FileType.Folder || !folder.source) { throw new Error('Invalid destination folder') } @@ -308,7 +312,7 @@ export class Uploader { logger.debug('Starting new batch upload', { target }) try { // setup client with root and custom header - const client = davGetClient(this.root, this._customHeaders) + const client = getClient(this.root, this._customHeaders) // Create the promise for the virtual root directory const promise = this.uploadDirectory(destination, rootFolder, callback, client) // Make sure to cancel it when requested diff --git a/lib/upload/utils/conflicts.ts b/lib/upload/utils/conflicts.ts index a7d4a10e..2d4e5adb 100644 --- a/lib/upload/utils/conflicts.ts +++ b/lib/upload/utils/conflicts.ts @@ -1,9 +1,9 @@ -/** +/*! * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ -import type { Node } from '@nextcloud/files' +import type { INode } from '../../node/index.ts' /** * Check if there is a conflict between two sets of files @@ -12,7 +12,7 @@ import type { Node } from '@nextcloud/files' * @param content all the existing files in the directory * @return true if there is a conflict */ -export function hasConflict(files: (File | FileSystemEntry | Node)[], content: Node[]): boolean { +export function hasConflict(files: (File | FileSystemEntry | INode)[], content: INode[]): boolean { return getConflicts(files, content).length > 0 } @@ -23,9 +23,9 @@ export function hasConflict(files: (File | FileSystemEntry | Node)[], content: N * @param content all the existing files in the directory * @return true if there is a conflict */ -export function getConflicts(files: T[], content: Node[]): T[] { - const contentNames = content.map((node: Node) => node.basename) - const conflicts = files.filter((node: File | FileSystemEntry | Node) => { +export function getConflicts(files: T[], content: INode[]): T[] { + const contentNames = content.map((node: INode) => node.basename) + const conflicts = files.filter((node: File | FileSystemEntry | INode) => { const name = 'basename' in node ? node.basename : node.name return contentNames.indexOf(name) !== -1 }) diff --git a/lib/upload/utils/fileTree.ts b/lib/upload/utils/fileTree.ts index e283d820..b9cbbd65 100644 --- a/lib/upload/utils/fileTree.ts +++ b/lib/upload/utils/fileTree.ts @@ -1,4 +1,4 @@ -/** +/*! * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ diff --git a/lib/upload/utils/l10n.ts b/lib/upload/utils/l10n.ts index 2f6c6879..d067afb5 100644 --- a/lib/upload/utils/l10n.ts +++ b/lib/upload/utils/l10n.ts @@ -5,7 +5,7 @@ import { getGettextBuilder } from '@nextcloud/l10n/gettext' const gtBuilder = getGettextBuilder() - .detectLocale() + .detectLanguage() // @ts-expect-error __TRANSLATIONS__ is replaced by vite __TRANSLATIONS__.map((data) => gtBuilder.addTranslation(data.locale, data.json)) From 3baf3380ade7b9f2616f4fe4d33ff9e0eaa110ef Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 11:40:41 +0100 Subject: [PATCH 08/14] refactor(upload): adjust code for Typescript erasable syntax Signed-off-by: Ferdinand Thiessen --- lib/upload/index.ts | 2 +- lib/upload/uploader/Eta.ts | 16 +++++++++------- lib/upload/uploader/Upload.ts | 32 +++++++++++++++++--------------- lib/upload/uploader/Uploader.ts | 18 ++++++++++-------- 4 files changed, 37 insertions(+), 31 deletions(-) diff --git a/lib/upload/index.ts b/lib/upload/index.ts index 6af332a4..8672945e 100644 --- a/lib/upload/index.ts +++ b/lib/upload/index.ts @@ -7,6 +7,6 @@ export type { Eta, EtaEventsMap } from './uploader/index.ts' export type { Directory, IDirectory } from './utils/fileTree.ts' export { getUploader, upload } from './getUploader.ts' -export { Upload, Status as UploadStatus } from './uploader/Upload.ts' +export { Upload, UploadStatus } from './uploader/Upload.ts' export { EtaStatus, Uploader, UploaderStatus } from './uploader/index.ts' export { getConflicts, hasConflict } from './utils/conflicts.ts' diff --git a/lib/upload/uploader/Eta.ts b/lib/upload/uploader/Eta.ts index bce9624a..cb3bd42a 100644 --- a/lib/upload/uploader/Eta.ts +++ b/lib/upload/uploader/Eta.ts @@ -7,11 +7,13 @@ import { TypedEventTarget } from 'typescript-event-target' import { formatFileSize } from '../../utils/fileSize.ts' import { n, t } from '../utils/l10n.ts' -export enum EtaStatus { - Idle = 0, - Paused = 1, - Running = 2, -} +export const EtaStatus = Object.freeze({ + Idle: 0, + Paused: 1, + Running: 2, +}) + +type TEtaStatus = typeof EtaStatus[keyof typeof EtaStatus] interface EtaOptions { /** Low pass filter cutoff time for smoothing the speed */ @@ -37,7 +39,7 @@ export class Eta extends TypedEventTarget { /** Current progress (cached) as interval [0,1] */ private _progress: number = 0 /** Status of the ETA */ - private _status: EtaStatus = EtaStatus.Idle + private _status: TEtaStatus = EtaStatus.Idle /** Time of the last update */ private _startTime: number = -1 /** Total elapsed time for current ETA */ @@ -168,7 +170,7 @@ export class Eta extends TypedEventTarget { /** * Status of the Eta (paused, active, idle). */ - public get status(): EtaStatus { + public get status(): TEtaStatus { return this._status } diff --git a/lib/upload/uploader/Upload.ts b/lib/upload/uploader/Upload.ts index ed6ac548..fa0608d3 100644 --- a/lib/upload/uploader/Upload.ts +++ b/lib/upload/uploader/Upload.ts @@ -7,14 +7,16 @@ import type { AxiosResponse } from 'axios' import { getMaxChunksSize } from '../utils/config.ts' -export enum Status { - INITIALIZED = 0, - UPLOADING = 1, - ASSEMBLING = 2, - FINISHED = 3, - CANCELLED = 4, - FAILED = 5, -} +export const UploadStatus = Object.freeze({ + INITIALIZED: 0, + UPLOADING: 1, + ASSEMBLING: 2, + FINISHED: 3, + CANCELLED: 4, + FAILED: 5, +}) + +type TUploadStatus = typeof UploadStatus[keyof typeof UploadStatus] export class Upload { private _source: string @@ -26,7 +28,7 @@ export class Upload { private _uploaded = 0 private _startTime = 0 - private _status: Status = Status.INITIALIZED + private _status: TUploadStatus = UploadStatus.INITIALIZED private _controller: AbortController private _response: AxiosResponse | null = null @@ -82,13 +84,13 @@ export class Upload { set uploaded(length: number) { if (length >= this._size) { this._status = this._isChunked - ? Status.ASSEMBLING - : Status.FINISHED + ? UploadStatus.ASSEMBLING + : UploadStatus.FINISHED this._uploaded = this._size return } - this._status = Status.UPLOADING + this._status = UploadStatus.UPLOADING this._uploaded = length // If first progress, let's log the start time @@ -97,14 +99,14 @@ export class Upload { } } - get status(): number { + get status(): TUploadStatus { return this._status } /** * Update this upload status */ - set status(status: Status) { + set status(status: TUploadStatus) { this._status = status } @@ -120,6 +122,6 @@ export class Upload { */ cancel() { this._controller.abort() - this._status = Status.CANCELLED + this._status = UploadStatus.CANCELLED } } diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts index 07f8a081..748f6575 100644 --- a/lib/upload/uploader/Uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -27,13 +27,15 @@ import { Directory } from '../utils/fileTree.ts' import { t } from '../utils/l10n.ts' import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' import { Eta } from './Eta.ts' -import { Upload, Status as UploadStatus } from './Upload.ts' +import { Upload, UploadStatus } from './Upload.ts' -export enum UploaderStatus { - IDLE = 0, - UPLOADING = 1, - PAUSED = 2, -} +export const UploaderStatus = Object.freeze({ + IDLE: 0, + UPLOADING: 1, + PAUSED: 2, +}) + +type TUploaderStatus = typeof UploaderStatus[keyof typeof UploaderStatus] export class Uploader { // Initialized via setter in the constructor @@ -51,7 +53,7 @@ export class Uploader { private _queueSize = 0 private _queueProgress = 0 - private _queueStatus: UploaderStatus = UploaderStatus.IDLE + private _queueStatus: TUploaderStatus = UploaderStatus.IDLE private _eta = new Eta() @@ -230,7 +232,7 @@ export class Uploader { // If already paused keep it that way if (this._queueStatus !== UploaderStatus.PAUSED) { - const pending = this._uploadQueue.find(({ status }) => [UploadStatus.INITIALIZED, UploadStatus.UPLOADING, UploadStatus.ASSEMBLING].includes(status)) + const pending = this._uploadQueue.find(({ status }) => ([UploadStatus.INITIALIZED, UploadStatus.UPLOADING, UploadStatus.ASSEMBLING] as number[]).includes(status)) if (this._jobQueue.size > 0 || pending) { this._queueStatus = UploaderStatus.UPLOADING } else { From 1c77595d4c637ef76a743b8d9364243b64928284 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Wed, 18 Feb 2026 13:03:12 +0100 Subject: [PATCH 09/14] refactor(upload): migrate from `p-cancelable` to native AbortController Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/Uploader.ts | 704 ++++++++++++++++---------------- 1 file changed, 357 insertions(+), 347 deletions(-) diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts index 748f6575..14efbfd0 100644 --- a/lib/upload/uploader/Uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -12,12 +12,10 @@ import { getCurrentUser } from '@nextcloud/auth' import axios, { isCancel } from '@nextcloud/axios' import { getCapabilities } from '@nextcloud/capabilities' import { encodePath } from '@nextcloud/paths' -import PCancelable from 'p-cancelable' import PQueue from 'p-queue' import { normalize } from 'path' import { defaultRemoteURL, defaultRootPath, getClient } from '../../dav/dav.ts' -import { FileType } from '../../node/fileType.ts' -import { Folder } from '../../node/folder.ts' +import { FileType, Folder } from '../../node/index.ts' import { Permission } from '../../permissions.ts' import logger from '../../utils/logger.ts' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' @@ -37,6 +35,37 @@ export const UploaderStatus = Object.freeze({ type TUploaderStatus = typeof UploaderStatus[keyof typeof UploaderStatus] +interface BaseOptions { + /** + * Abort signal to cancel the upload + */ + signal?: AbortSignal +} + +interface UploadOptions extends BaseOptions { + /** + * The root folder where to upload + */ + root?: string + + /** + * Number of retries for the upload + * + * @default 5 + */ + retries?: number +} + +interface DirectoryUploadOptions extends BaseOptions { + destination: string + directory: Directory + client: WebDAVClient +} + +interface BatchUploadOptions extends BaseOptions { + callback?: (nodes: Array, currentPath: string) => Promise | false> +} + export class Uploader { // Initialized via setter in the constructor private _destinationFolder!: IFolder @@ -266,14 +295,15 @@ export class Uploader { * * @param destination The destination path relative to the root folder. e.g. /foo/bar (a file "a.txt" will be uploaded then to "/foo/bar/a.txt") * @param files The files and/or folders to upload - * @param callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) - * @return Cancelable promise that resolves to an array of uploads + * @param options - optional parameters + * @param options.callback Callback that receives the nodes in the current folder and the current path to allow resolving conflicts, all nodes that are returned will be uploaded (if a folder does not exist it will be created) + * @throws {UploadCancelledError} - If the upload was canceled by the user via the abort signal * * @example * ```ts * // For example this is from handling the onchange event of an input[type=file] * async handleFiles(files: File[]) { - * this.uploads = await this.uploader.batchUpload('uploads', files, this.handleConflicts) + * this.uploads = await this.uploader.batchUpload('uploads', files, { callback: this.handleConflicts }) * } * * async handleConflicts(nodes: File[], currentPath: string) { @@ -293,184 +323,166 @@ export class Uploader { * } * ``` */ - batchUpload( + async batchUpload( destination: string, files: (File | FileSystemEntry)[], - callback?: (nodes: Array, currentPath: string) => Promise | false>, - ): PCancelable { - if (!callback) { - callback = async (files: Array) => files - } - - return new PCancelable(async (resolve, reject, onCancel) => { - const rootFolder = new Directory('') - await rootFolder.addChildren(files) - // create a meta upload to ensure all ongoing child requests are listed - const target = `${this.root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` - const upload = new Upload(target, false, 0, rootFolder) - upload.status = UploadStatus.UPLOADING - this._uploadQueue.push(upload) - - logger.debug('Starting new batch upload', { target }) - try { - // setup client with root and custom header - const client = getClient(this.root, this._customHeaders) - // Create the promise for the virtual root directory - const promise = this.uploadDirectory(destination, rootFolder, callback, client) - // Make sure to cancel it when requested - onCancel(() => promise.cancel()) - // await the uploads and resolve with "finished" status - const uploads = await promise - upload.status = UploadStatus.FINISHED - resolve(uploads) - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError) { - logger.info('Upload cancelled by user', { error }) - upload.status = UploadStatus.CANCELLED - reject(new UploadCancelledError(error)) - } else { - logger.error('Error in batch upload', { error }) - upload.status = UploadStatus.FAILED - reject(error) - } - } finally { - // Upload queue is cleared when all the uploading jobs are done - // Meta upload unlike real uploading does not create a job - // Removing it manually here to make sure it is remove even when no uploading happened and there was nothing to finish - this._uploadQueue.splice(this._uploadQueue.indexOf(upload), 1) - this._notifyAll(upload) - this.updateStats() + options?: BatchUploadOptions, + ): Promise { + const rootFolder = new Directory('') + await rootFolder.addChildren(files) + // create a meta upload to ensure all ongoing child requests are listed + const target = `${this.root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const upload = new Upload(target, false, 0, rootFolder) + upload.status = UploadStatus.UPLOADING + this._uploadQueue.push(upload) + + logger.debug('Starting new batch upload', { target }) + try { + // setup client with root and custom header + const client = getClient(this.root, this._customHeaders) + // Create the promise for the virtual root directory + const promise = this.uploadDirectory({ + ...options, + destination, + directory: rootFolder, + client, + }) + // await the uploads and resolve with "finished" status + const uploads = await promise + upload.status = UploadStatus.FINISHED + return uploads + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError || (error instanceof DOMException && error.name === 'AbortError')) { + logger.info('Upload cancelled by user', { error }) + upload.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } else { + logger.error('Error in batch upload', { error }) + upload.status = UploadStatus.FAILED + throw error } - }) + } finally { + // Upload queue is cleared when all the uploading jobs are done + // Meta upload unlike real uploading does not create a job + // Removing it manually here to make sure it is remove even when no uploading happened and there was nothing to finish + this._uploadQueue.splice(this._uploadQueue.indexOf(upload), 1) + this._notifyAll(upload) + this.updateStats() + } } /** * Helper to create a directory wrapped inside an Upload class * - * @param destination Destination where to create the directory - * @param directory The directory to create - * @param client The cached WebDAV client + * @param options - the options for the directory upload + * @param options.destination Destination where to create the directory + * @param options.directory The directory to create + * @param options.client The cached WebDAV client */ - private createDirectory(destination: string, directory: Directory, client: WebDAVClient): PCancelable { - const folderPath = normalize(`${destination}/${directory.name}`).replace(/\/$/, '') - const rootPath = `${this.root.replace(/\/$/, '')}/${folderPath.replace(/^\//, '')}` - - if (!directory.name) { + private async createDirectory(options: DirectoryUploadOptions): Promise { + if (!options.directory.name) { throw new Error('Can not create empty directory') } + const folderPath = normalize(`${options.destination}/${options.directory.name}`).replace(/\/$/, '') + const rootPath = `${this.root.replace(/\/$/, '')}/${folderPath.replace(/^\//, '')}` + // Add a new upload to the upload queue - const currentUpload: Upload = new Upload(rootPath, false, 0, directory) + const currentUpload: Upload = new Upload(rootPath, false, 0, options.directory) + if (options.signal) { + options.signal.addEventListener('abort', currentUpload.cancel) + } this._uploadQueue.push(currentUpload) - // Return the cancelable promise - return new PCancelable(async (resolve, reject, onCancel) => { - const abort = new AbortController() - onCancel(() => abort.abort()) - currentUpload.signal.addEventListener('abort', () => reject(t('Upload has been cancelled'))) - + try { // Add the request to the job queue -> wait for finish to resolve the promise - await this._jobQueue.add(async () => { + return await this._jobQueue.add(async () => { currentUpload.status = UploadStatus.UPLOADING - try { - await client.createDirectory(folderPath, { signal: abort.signal }) - resolve(currentUpload) - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError) { - currentUpload.status = UploadStatus.CANCELLED - reject(new UploadCancelledError(error)) - } else if (error && typeof error === 'object' && 'status' in error && error.status === 405) { - // Directory already exists, so just write into it and ignore the error - logger.debug('Directory already exists, writing into it', { directory: directory.name }) - currentUpload.status = UploadStatus.FINISHED - resolve(currentUpload) - } else { - // Another error happened, so abort uploading the directory - currentUpload.status = UploadStatus.FAILED - reject(error) - } - } finally { - // Update statistics - this._notifyAll(currentUpload) - this.updateStats() - } + await options.client.createDirectory(folderPath, { signal: currentUpload.signal }) + return currentUpload }) - }) + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError || (error instanceof DOMException && error.name === 'AbortError')) { + currentUpload.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } else if (error && typeof error === 'object' && 'status' in error && error.status === 405) { + // Directory already exists, so just write into it and ignore the error + logger.debug('Directory already exists, writing into it', { directory: options.directory.name }) + currentUpload.status = UploadStatus.FINISHED + return currentUpload + } else { + // Another error happened, so abort uploading the directory + currentUpload.status = UploadStatus.FAILED + throw error + } + } finally { + // Update statistics + this._notifyAll(currentUpload) + this.updateStats() + } } // Helper for uploading directories (recursively) - private uploadDirectory( - destination: string, - directory: Directory, - callback: (nodes: Array, currentPath: string) => Promise | false>, - // client as parameter to cache it for performance - client: WebDAVClient, - ): PCancelable { - const folderPath = normalize(`${destination}/${directory.name}`).replace(/\/$/, '') - - return new PCancelable(async (resolve, reject, onCancel) => { - const abort = new AbortController() - onCancel(() => abort.abort()) - - // Let the user handle conflicts - const selectedForUpload = await callback(directory.children, folderPath) - if (selectedForUpload === false) { - logger.debug('Upload canceled by user', { directory }) - reject(new UploadCancelledError('Conflict resolution cancelled by user')) - return - } else if (selectedForUpload.length === 0 && directory.children.length > 0) { - logger.debug('Skipping directory, as all files were skipped by user', { directory }) - resolve([]) - return - } + private async uploadDirectory(options: BatchUploadOptions & DirectoryUploadOptions): Promise { + // we use an internal abort controller to also cancel uploads if an error happened. + // So if a signal is provided we connect it to our controller. + const internalAbortController = new AbortController() + if (options.signal) { + options.signal.addEventListener('abort', () => internalAbortController.abort()) + } - const directories: PCancelable[] = [] - const uploads: PCancelable[] = [] - // Setup abort controller to cancel all child requests - abort.signal.addEventListener('abort', () => { - directories.forEach((upload) => upload.cancel()) - uploads.forEach((upload) => upload.cancel()) - }) + const internalOptions = { ...options, signal: internalAbortController.signal } + const folderPath = normalize(`${internalOptions.destination}/${internalOptions.directory.name}`).replace(/\/$/, '') + + // Let the user handle conflicts + const selectedForUpload = await (internalOptions.callback?.(internalOptions.directory.children, folderPath) ?? internalOptions.directory.children) + if (selectedForUpload === false) { + logger.debug('Upload canceled by user', { directory: internalOptions.directory }) + throw new UploadCancelledError('Conflict resolution cancelled by user') + } else if (selectedForUpload.length === 0 && internalOptions.directory.children.length > 0) { + logger.debug('Skipping directory, as all files were skipped by user', { directory: internalOptions.directory }) + return [] + } - logger.debug('Start directory upload', { directory }) - try { - if (directory.name) { - // If not the virtual root we need to create the directory first before uploading - // Make sure the promise is listed in the final result - uploads.push(this.createDirectory(destination, directory, client) as PCancelable) - // Ensure the directory is created before uploading / creating children - await uploads.at(-1) - } + logger.debug('Start directory upload', { directory: internalOptions.directory }) + const directories: Promise[] = [] + const uploads: Promise[] = [] + try { + if (internalOptions.directory.name) { + // If not the virtual root we need to create the directory first before uploading + // Make sure the promise is listed in the final result + uploads.push(this.createDirectory(internalOptions)) + // Ensure the directory is created before uploading / creating children + await uploads.at(-1) + } - for (const node of selectedForUpload) { - if (node instanceof Directory) { - directories.push(this.uploadDirectory(folderPath, node, callback, client)) - } else { - uploads.push(this.upload(`${folderPath}/${node.name}`, node)) - } + for (const node of selectedForUpload) { + if (node instanceof Directory) { + directories.push(this.uploadDirectory({ ...internalOptions, directory: node })) + } else { + uploads.push(this.upload(`${folderPath}/${node.name}`, node, { signal: internalOptions.signal })) } - - const resolvedUploads = await Promise.all(uploads) - const resolvedDirectoryUploads = await Promise.all(directories) - resolve([resolvedUploads, ...resolvedDirectoryUploads].flat()) - } catch (e) { - // Ensure a failure cancels all other requests - abort.abort(e) - reject(e) } - }) + + const resolvedUploads = await Promise.all(uploads) + const resolvedDirectoryUploads = await Promise.all(directories) + return [resolvedUploads, ...resolvedDirectoryUploads].flat() + } catch (e) { + // Ensure a failure cancels all other requests + internalAbortController.abort() + throw e + } } /** * Upload a file to the given path * - * @param destination the destination path relative to the root folder. e.g. /foo/bar.txt - * @param fileHandle the file to upload - * @param root the root folder to upload to - * @param retries number of retries + * @param destination - The destination path relative to the root folder. e.g. /foo/bar.txt + * @param fileHandle - The file to upload + * @param options - Optional parameters */ - upload(destination: string, fileHandle: File | FileSystemFileEntry, root?: string, retries: number = 5): PCancelable { - root = root || this.root + async upload(destination: string, fileHandle: File | FileSystemFileEntry, options?: UploadOptions): Promise { + const root = options?.root ?? this.root const destinationPath = `${root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` // Get the encoded source url to this object for requests purposes @@ -480,224 +492,222 @@ export class Uploader { this.eta.resume() logger.debug(`Uploading ${fileHandle.name} to ${encodedDestinationFile}`) - const promise = new PCancelable(async (resolve, reject, onCancel): Promise => { - // Handle file system entries by retrieving the file handle - if (isFileSystemFileEntry(fileHandle)) { - fileHandle = await new Promise((resolve) => (fileHandle as FileSystemFileEntry).file(resolve, reject)) - } - // We can cast here as we handled system entries in the if above - const file = fileHandle as File - - // @ts-expect-error TS2339 Object has no defined properties - const supportsPublicChunking = getCapabilities().dav?.public_shares_chunking ?? false - const maxChunkSize = getMaxChunksSize('size' in file ? file.size : undefined) - // If manually disabled or if the file is too small - const disabledChunkUpload = (this._isPublic && !supportsPublicChunking) - || maxChunkSize === 0 - || ('size' in file && file.size < maxChunkSize) - - const upload = new Upload(destinationPath, !disabledChunkUpload, file.size, file) - this._uploadQueue.push(upload) - this.updateStats() - - // Register cancellation caller - onCancel(upload.cancel) - - if (!disabledChunkUpload) { - logger.debug('Initializing chunked upload', { file, upload }) - - // Let's initialize a chunk upload - const tempUrl = await initChunkWorkspace(encodedDestinationFile, retries, this._isPublic, this._customHeaders) - const chunksQueue: Array> = [] - - // Generate chunks array - for (let chunk = 0; chunk < upload.chunks; chunk++) { - const bufferStart = chunk * maxChunkSize - // Don't go further than the file size - const bufferEnd = Math.min(bufferStart + maxChunkSize, upload.size) - // Make it a Promise function for better memory management - const blob = () => getChunk(file, bufferStart, maxChunkSize) - - // Init request queue - const request = () => { - // bytes uploaded on this chunk (as upload.uploaded tracks all chunks) - let chunkBytes = 0 - return uploadData( - `${tempUrl}/${chunk + 1}`, - blob, - { - signal: upload.signal, - destinationFile: encodedDestinationFile, - retries, - onUploadProgress: ({ bytes }) => { - // Only count 90% of bytes as the request is not yet processed by server - // we set the remaining 10% when the request finished (server responded). - const progressBytes = bytes * 0.9 - chunkBytes += progressBytes - upload.uploaded += progressBytes - this.updateStats() - }, - onUploadRetry: () => { - // Current try failed, so reset the stats for this chunk - // meaning remove the uploaded chunk bytes from stats - upload.uploaded -= chunkBytes - chunkBytes = 0 - this.updateStats() - }, - headers: { - ...this._customHeaders, - ...this._mtimeHeader(file), - 'OC-Total-Length': file.size, - 'Content-Type': 'application/octet-stream', - }, - }, - ) - // Update upload progress on chunk completion - .then(() => { - // request fully done so we uploaded the full chunk - // we first remove the intermediate chunkBytes from progress events - // and then add the real full size - upload.uploaded += bufferEnd - bufferStart - chunkBytes - this.updateStats() - }) - .catch((error) => { - if (error?.response?.status === 507) { - logger.error('Upload failed, not enough space on the server or quota exceeded. Cancelling the remaining chunks', { error, upload }) - upload.cancel() - upload.status = UploadStatus.FAILED - throw error - } - - if (!isCancel(error)) { - logger.error(`Chunk ${chunk + 1} ${bufferStart} - ${bufferEnd} uploading failed`, { error, upload }) - upload.cancel() - upload.status = UploadStatus.FAILED - } - throw error - }) - } - chunksQueue.push(this._jobQueue.add(request)) - } + // Handle file system entries by retrieving the file handle + if (isFileSystemFileEntry(fileHandle)) { + fileHandle = await new Promise((resolve, reject) => (fileHandle as FileSystemFileEntry).file(resolve, reject)) + } + // We can cast here as we handled system entries in the if above + const file = fileHandle as File - const request = async () => { - try { - // Once all chunks are sent, assemble the final file - await Promise.all(chunksQueue) + // @ts-expect-error TS2339 Object has no defined properties + const supportsPublicChunking = getCapabilities().dav?.public_shares_chunking ?? false + const maxChunkSize = getMaxChunksSize('size' in file ? file.size : undefined) + // If manually disabled or if the file is too small + const disabledChunkUpload = (this._isPublic && !supportsPublicChunking) + || maxChunkSize === 0 + || ('size' in file && file.size < maxChunkSize) + + const upload = new Upload(destinationPath, !disabledChunkUpload, file.size, file) + this._uploadQueue.push(upload) + this.updateStats() - // Assemble the chunks - upload.status = UploadStatus.ASSEMBLING - this.updateStats() + // Register cancellation caller + if (options?.signal) { + options.signal.addEventListener('abort', upload.cancel) + } - // Send the assemble request - upload.response = await axios.request({ - method: 'MOVE', - url: `${tempUrl}/.file`, + const retries = options?.retries ?? 5 + if (!disabledChunkUpload) { + logger.debug('Initializing chunked upload', { file, upload }) + + // Let's initialize a chunk upload + const tempUrl = await initChunkWorkspace(encodedDestinationFile, retries, this._isPublic, this._customHeaders) + const chunksQueue: Array> = [] + + // Generate chunks array + for (let chunk = 0; chunk < upload.chunks; chunk++) { + const bufferStart = chunk * maxChunkSize + // Don't go further than the file size + const bufferEnd = Math.min(bufferStart + maxChunkSize, upload.size) + // Make it a Promise function for better memory management + const blob = () => getChunk(file, bufferStart, maxChunkSize) + + // Init request queue + const request = () => { + // bytes uploaded on this chunk (as upload.uploaded tracks all chunks) + let chunkBytes = 0 + return uploadData( + `${tempUrl}/${chunk + 1}`, + blob, + { + signal: upload.signal, + destinationFile: encodedDestinationFile, + retries, + onUploadProgress: ({ bytes }) => { + // Only count 90% of bytes as the request is not yet processed by server + // we set the remaining 10% when the request finished (server responded). + const progressBytes = bytes * 0.9 + chunkBytes += progressBytes + upload.uploaded += progressBytes + this.updateStats() + }, + onUploadRetry: () => { + // Current try failed, so reset the stats for this chunk + // meaning remove the uploaded chunk bytes from stats + upload.uploaded -= chunkBytes + chunkBytes = 0 + this.updateStats() + }, headers: { ...this._customHeaders, ...this._mtimeHeader(file), 'OC-Total-Length': file.size, - Destination: encodedDestinationFile, + 'Content-Type': 'application/octet-stream', }, + }, + ) + // Update upload progress on chunk completion + .then(() => { + // request fully done so we uploaded the full chunk + // we first remove the intermediate chunkBytes from progress events + // and then add the real full size + upload.uploaded += bufferEnd - bufferStart - chunkBytes + this.updateStats() }) - upload.status = UploadStatus.FINISHED - this.updateStats() - - logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) - resolve(upload) - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError) { - upload.status = UploadStatus.CANCELLED - reject(new UploadCancelledError(error)) - } else { - upload.status = UploadStatus.FAILED - reject(t('Failed to assemble the chunks together')) - } - // Cleaning up temp directory - axios.request({ - method: 'DELETE', - url: `${tempUrl}`, + .catch((error) => { + if (error?.response?.status === 507) { + logger.error('Upload failed, not enough space on the server or quota exceeded. Cancelling the remaining chunks', { error, upload }) + upload.cancel() + upload.status = UploadStatus.FAILED + throw error + } + + if (!isCancel(error)) { + logger.error(`Chunk ${chunk + 1} ${bufferStart} - ${bufferEnd} uploading failed`, { error, upload }) + upload.cancel() + upload.status = UploadStatus.FAILED + } + throw error }) - } finally { - // Notify listeners of the upload completion - this._notifyAll(upload) + } + chunksQueue.push(this._jobQueue.add(request)) + } + + const request = async () => { + try { + // Once all chunks are sent, assemble the final file + await Promise.all(chunksQueue) + + // Assemble the chunks + upload.status = UploadStatus.ASSEMBLING + this.updateStats() + + // Send the assemble request + upload.response = await axios.request({ + method: 'MOVE', + url: `${tempUrl}/.file`, + headers: { + ...this._customHeaders, + ...this._mtimeHeader(file), + 'OC-Total-Length': file.size, + Destination: encodedDestinationFile, + }, + }) + upload.status = UploadStatus.FINISHED + this.updateStats() + + logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) + return upload + } catch (error) { + // Cleaning up temp directory + axios.request({ + method: 'DELETE', + url: `${tempUrl}`, + }) + + if (isCancel(error) || error instanceof UploadCancelledError) { + upload.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } else { + upload.status = UploadStatus.FAILED + throw new Error(t('Failed to assemble the chunks together')) } + } finally { + // Notify listeners of the upload completion + this._notifyAll(upload) } + } - this._jobQueue.add(request) - } else { - logger.debug('Initializing regular upload', { file, upload }) - - // Generating upload limit - const blob = await getChunk(file, 0, upload.size) - const request = async () => { - try { - upload.response = await uploadData( - encodedDestinationFile, - blob, - { - signal: upload.signal, - onUploadProgress: ({ bytes }) => { - // As this is only the sent bytes not the processed ones we only count 90%. - // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. - upload.uploaded += bytes * 0.9 - this.updateStats() - }, - onUploadRetry: () => { - upload.uploaded = 0 - this.updateStats() - }, - headers: { - ...this._customHeaders, - ...this._mtimeHeader(file), - 'Content-Type': file.type, - }, + this._jobQueue.add(request) + } else { + logger.debug('Initializing regular upload', { file, upload }) + + // Generating upload limit + const blob = await getChunk(file, 0, upload.size) + const request = async () => { + try { + upload.response = await uploadData( + encodedDestinationFile, + blob, + { + signal: upload.signal, + onUploadProgress: ({ bytes }) => { + // As this is only the sent bytes not the processed ones we only count 90%. + // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. + upload.uploaded += bytes * 0.9 + this.updateStats() }, - ) - - // Update progress - now we set the uploaded size to 100% of the file size - upload.uploaded = upload.size - this.updateStats() - - // Resolve - logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) - resolve(upload) - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError) { - upload.status = UploadStatus.CANCELLED - reject(new UploadCancelledError(error)) - return - } - - // Attach response to the upload object - if ((error as AxiosError)?.response) { - upload.response = (error as AxiosError).response as AxiosResponse - } + onUploadRetry: () => { + upload.uploaded = 0 + this.updateStats() + }, + headers: { + ...this._customHeaders, + ...this._mtimeHeader(file), + 'Content-Type': file.type, + }, + }, + ) - upload.status = UploadStatus.FAILED - logger.error(`Failed uploading ${file.name}`, { error, file, upload }) - reject(t('Failed to upload the file')) + // Update progress - now we set the uploaded size to 100% of the file size + upload.uploaded = upload.size + this.updateStats() + + // Resolve + logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) + return upload + } catch (error) { + if (isCancel(error) || error instanceof UploadCancelledError) { + upload.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } + + // Attach response to the upload object + if ((error as AxiosError)?.response) { + upload.response = (error as AxiosError).response as AxiosResponse } + upload.status = UploadStatus.FAILED + logger.error(`Failed uploading ${file.name}`, { error, file, upload }) + throw new Error(t('Failed to upload the file')) + } finally { // Notify listeners of the upload completion this._notifyAll(upload) } - this._jobQueue.add(request) - this.updateStats() } + this._jobQueue.add(request) + } - // Reset when upload queue is done - // Only when we know we're closing on the last chunks - // and/or assembling we can reset the uploader. - // Otherwise he queue might be idle for a short time - // and clear the Upload queue before we're done. - this._jobQueue.onIdle() - .then(() => this.reset()) - - // Finally return the Upload - return upload - }) as PCancelable + // Reset when upload queue is done + // Only when we know we're closing on the last chunks + // and/or assembling we can reset the uploader. + // Otherwise he queue might be idle for a short time + // and clear the Upload queue before we're done. + this._jobQueue.onIdle() + .then(() => this.reset()) - return promise + // Finally return the Upload + return upload } /** From 3b3c63d169c4140e04f41012f6548fc0f60b5b5a Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Thu, 19 Feb 2026 01:32:14 +0100 Subject: [PATCH 10/14] refactor(upload): split upload logic into separate classes for files and folders This also refactors the uploader to support custom events allowing apps to register event listeners for reactivity. Signed-off-by: Ferdinand Thiessen --- lib/upload/errors/UploadCancelledError.ts | 5 +- lib/upload/errors/UploadFailedError.spec.ts | 44 ++ lib/upload/errors/UploadFailedError.ts | 21 + lib/upload/index.ts | 11 +- lib/upload/uploader/Upload.ts | 164 ++--- lib/upload/uploader/UploadFile.ts | 225 +++++++ lib/upload/uploader/UploadFileTree.ts | 206 +++++++ lib/upload/uploader/Uploader.ts | 642 ++++---------------- lib/upload/uploader/index.ts | 15 +- lib/upload/utils/config.ts | 11 + lib/upload/utils/conflicts.ts | 34 -- lib/upload/utils/requests.spec.ts | 35 ++ lib/upload/utils/requests.ts | 31 + 13 files changed, 772 insertions(+), 672 deletions(-) create mode 100644 lib/upload/errors/UploadFailedError.spec.ts create mode 100644 lib/upload/errors/UploadFailedError.ts create mode 100644 lib/upload/uploader/UploadFile.ts create mode 100644 lib/upload/uploader/UploadFileTree.ts delete mode 100644 lib/upload/utils/conflicts.ts create mode 100644 lib/upload/utils/requests.spec.ts create mode 100644 lib/upload/utils/requests.ts diff --git a/lib/upload/errors/UploadCancelledError.ts b/lib/upload/errors/UploadCancelledError.ts index b2de557a..cce917d5 100644 --- a/lib/upload/errors/UploadCancelledError.ts +++ b/lib/upload/errors/UploadCancelledError.ts @@ -2,10 +2,11 @@ * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ -import { t } from '../utils/l10n.ts' export class UploadCancelledError extends Error { + private __UPLOAD_CANCELLED__ = true + public constructor(cause?: unknown) { - super(t('Upload has been cancelled'), { cause }) + super('Upload has been cancelled', { cause }) } } diff --git a/lib/upload/errors/UploadFailedError.spec.ts b/lib/upload/errors/UploadFailedError.spec.ts new file mode 100644 index 00000000..1a16aa8f --- /dev/null +++ b/lib/upload/errors/UploadFailedError.spec.ts @@ -0,0 +1,44 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosRequestHeaders, AxiosResponse } from 'axios' + +import { AxiosError } from 'axios' +import { expect, test } from 'vitest' +import { UploadFailedError } from './UploadFailedError.ts' + +test('UploadFailedError - axios error but no response', () => { + const cause = new AxiosError('Network error') + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBeUndefined() +}) + +test('UploadFailedError - axios error', () => { + const response = {} as AxiosResponse + const cause = new AxiosError('Network error', '200', { headers: {} as AxiosRequestHeaders }, {}, response) + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBe(response) +}) + +test('UploadFailedError - generic error', () => { + const cause = new Error('Generic error') + const error = new UploadFailedError(cause) + expect(error).toBeInstanceOf(Error) + expect(error).toBeInstanceOf(UploadFailedError) + expect(error.message).toBe('Upload has failed') + expect(error.cause).toBe(cause) + expect(error).toHaveProperty('__UPLOAD_FAILED__') + expect(error.response).toBeUndefined() +}) diff --git a/lib/upload/errors/UploadFailedError.ts b/lib/upload/errors/UploadFailedError.ts new file mode 100644 index 00000000..e85b3f78 --- /dev/null +++ b/lib/upload/errors/UploadFailedError.ts @@ -0,0 +1,21 @@ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type { AxiosResponse } from '@nextcloud/axios' + +import { isAxiosError } from '@nextcloud/axios' + +export class UploadFailedError extends Error { + private __UPLOAD_FAILED__ = true + + readonly response?: AxiosResponse + + public constructor(cause?: unknown) { + super('Upload has failed', { cause }) + if (isAxiosError(cause) && cause.response) { + this.response = cause.response + } + } +} diff --git a/lib/upload/index.ts b/lib/upload/index.ts index 8672945e..8d749dee 100644 --- a/lib/upload/index.ts +++ b/lib/upload/index.ts @@ -3,10 +3,7 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -export type { Eta, EtaEventsMap } from './uploader/index.ts' -export type { Directory, IDirectory } from './utils/fileTree.ts' - -export { getUploader, upload } from './getUploader.ts' -export { Upload, UploadStatus } from './uploader/Upload.ts' -export { EtaStatus, Uploader, UploaderStatus } from './uploader/index.ts' -export { getConflicts, hasConflict } from './utils/conflicts.ts' +export { UploadCancelledError } from './errors/UploadCancelledError.ts' +export { UploadFailedError } from './errors/UploadFailedError.ts' +export * from './uploader/index.ts' +export { getUploader } from './getUploader.ts' diff --git a/lib/upload/uploader/Upload.ts b/lib/upload/uploader/Upload.ts index fa0608d3..9a27f1bc 100644 --- a/lib/upload/uploader/Upload.ts +++ b/lib/upload/uploader/Upload.ts @@ -3,125 +3,89 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -import type { AxiosResponse } from 'axios' +import type PQueue from 'p-queue' -import { getMaxChunksSize } from '../utils/config.ts' +import { TypedEventTarget } from 'typescript-event-target' export const UploadStatus = Object.freeze({ + /** The upload was initialized */ INITIALIZED: 0, - UPLOADING: 1, - ASSEMBLING: 2, - FINISHED: 3, - CANCELLED: 4, - FAILED: 5, + /** The upload was scheduled but is not yet uploading */ + SCHEDULED: 1, + /** The upload itself is running */ + UPLOADING: 2, + /** Chunks are being assembled */ + ASSEMBLING: 3, + /** The upload finished successfully */ + FINISHED: 4, + /** The upload was cancelled by the user */ + CANCELLED: 5, + /** The upload failed */ + FAILED: 6, }) -type TUploadStatus = typeof UploadStatus[keyof typeof UploadStatus] +export type TUploadStatus = typeof UploadStatus[keyof typeof UploadStatus] -export class Upload { - private _source: string - private _file: File - private _isChunked: boolean - private _chunks: number - - private _size: number - private _uploaded = 0 - private _startTime = 0 - - private _status: TUploadStatus = UploadStatus.INITIALIZED - private _controller: AbortController - private _response: AxiosResponse | null = null - - constructor(source: string, chunked = false, size: number, file: File) { - const chunks = Math.min(getMaxChunksSize() > 0 ? Math.ceil(size / getMaxChunksSize()) : 1, 10000) - this._source = source - this._isChunked = chunked && getMaxChunksSize() > 0 && chunks > 1 - this._chunks = this._isChunked ? chunks : 1 - this._size = size - this._file = file - this._controller = new AbortController() - } - - get source(): string { - return this._source - } - - get file(): File { - return this._file - } - - get isChunked(): boolean { - return this._isChunked - } - - get chunks(): number { - return this._chunks - } - - get size(): number { - return this._size - } - - get startTime(): number { - return this._startTime - } - - set response(response: AxiosResponse | null) { - this._response = response - } - - get response(): AxiosResponse | null { - return this._response - } - - get uploaded(): number { - return this._uploaded - } +interface UploadEvents { + finished: CustomEvent + progress: CustomEvent +} +export interface IUpload extends TypedEventTarget { /** - * Update the uploaded bytes of this upload + * The source of the upload */ - set uploaded(length: number) { - if (length >= this._size) { - this._status = this._isChunked - ? UploadStatus.ASSEMBLING - : UploadStatus.FINISHED - this._uploaded = this._size - return - } - - this._status = UploadStatus.UPLOADING - this._uploaded = length - - // If first progress, let's log the start time - if (this._startTime === 0) { - this._startTime = new Date().getTime() - } - } - - get status(): TUploadStatus { - return this._status - } - + readonly source: string /** - * Update this upload status + * Whether the upload is chunked or not */ - set status(status: TUploadStatus) { - this._status = status - } + readonly isChunked: boolean + /** + * The total size of the upload in bytes + */ + readonly totalBytes: number + /** + * Timestamp of when the upload started. + * Will return `undefined` if the upload has not started yet. + */ + readonly startTime?: number + /** + * The number of bytes that have been uploaded so far + */ + readonly uploadedBytes: number + /** + * The current status of the upload + */ + readonly status: TUploadStatus + /** + * The internal abort signal + */ + readonly signal: AbortSignal /** - * Returns the axios cancel token source + * Cancels the upload */ + cancel(): void +} + +export abstract class Upload extends TypedEventTarget implements Partial { + #abortController = new AbortController() + get signal(): AbortSignal { - return this._controller.signal + return this.#abortController.signal } /** - * Cancel any ongoing requests linked to this upload + * Cancels the upload */ - cancel() { - this._controller.abort() - this._status = UploadStatus.CANCELLED + public cancel(): void { + this.#abortController.abort() } + + /** + * Start the upload + * + * @param queue - The job queue. It is used to limit the number of concurrent upload jobs. + */ + public abstract start(queue: PQueue): Promise } diff --git a/lib/upload/uploader/UploadFile.ts b/lib/upload/uploader/UploadFile.ts new file mode 100644 index 00000000..2dbe68a8 --- /dev/null +++ b/lib/upload/uploader/UploadFile.ts @@ -0,0 +1,225 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type PQueue from 'p-queue' +import type { IUpload, TUploadStatus } from './Upload.ts' + +import axios from '@nextcloud/axios' +import { getCapabilities } from '@nextcloud/capabilities' +import { join } from '@nextcloud/paths' +import { isPublicShare } from '@nextcloud/sharing/public' +import { UploadCancelledError } from '../errors/UploadCancelledError.ts' +import { UploadFailedError } from '../errors/UploadFailedError.ts' +import { getMaxChunksSize } from '../utils/config.ts' +import { getMtimeHeader, isRequestAborted } from '../utils/requests.ts' +import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' +import { Upload, UploadStatus } from './Upload.ts' + +/** + * Shared state to determine if the server supports chunking for public shares. + */ +let supportsPublicChunking: boolean | undefined + +/** + * A class representing a single file to be uploaded + */ +export class UploadFile extends Upload implements IUpload { + #customHeaders: Record + #fileHandle: File | FileSystemFileEntry + #file?: File + #noChunking: boolean + + public source: string + public status: TUploadStatus = UploadStatus.INITIALIZED + public startTime?: number + public totalBytes: number = 0 + public uploadedBytes: number = -1 + public numberOfChunks: number = 1 + + constructor( + destination: string, + fileHandle: File | FileSystemFileEntry, + options: { headers?: Record, noChunking?: boolean }, + ) { + super() + const { + headers = {}, + noChunking = false, + } = options + + // exposed state + this.source = destination + this.totalBytes = 'size' in fileHandle ? fileHandle.size : -1 + + // private state + this.#fileHandle = fileHandle + this.#customHeaders = headers + this.#noChunking = noChunking + this.signal.addEventListener('abort', () => { + if (this.status !== UploadStatus.FAILED) { + this.status = UploadStatus.CANCELLED + } + }) + } + + get isChunked(): boolean { + if (supportsPublicChunking === undefined) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + supportsPublicChunking = (getCapabilities() as Record).dav?.public_shares_chunking ?? false + } + + const maxChunkSize = getMaxChunksSize('size' in this.#fileHandle ? this.#fileHandle.size : undefined) + return !this.#noChunking + && maxChunkSize > 0 + && this.totalBytes >= maxChunkSize + && (supportsPublicChunking || !isPublicShare()) + } + + async start(queue: PQueue): Promise { + if (this.status !== UploadStatus.INITIALIZED) { + throw new Error('Upload already started') + } + + this.startTime = Date.now() + this.#file = await getFile(this.#fileHandle) + this.totalBytes = this.#file.size + this.uploadedBytes = 0 + this.status = UploadStatus.SCHEDULED + + try { + if (this.isChunked) { + await this.#uploadChunked(queue) + } else { + queue.add(() => this.#upload()) + } + } catch (error) { + this.cancel() + if (error instanceof UploadCancelledError || error instanceof UploadFailedError) { + throw error + } + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } + } + + /** + * Internal implementation of the upload process for non-chunked uploads. + */ + async #upload() { + this.status = UploadStatus.UPLOADING + const chunk = await getChunk(this.#file!, 0, this.#file!.size) + try { + await this.#uploadChunk(chunk, this.source) + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + } + + /** + * Internal implementation of the upload process for chunked uploads. + * + * @param queue - The job queue to throttle number of concurrent chunk uploads + */ + async #uploadChunked(queue: PQueue) { + this.status = UploadStatus.UPLOADING + const temporaryUrl = await initChunkWorkspace(this.source, 5, isPublicShare(), this.#customHeaders) + + const promises: Promise[] = [] + this.numberOfChunks = Math.ceil(this.totalBytes / getMaxChunksSize(this.totalBytes)) + for (let i = 0; i < this.numberOfChunks; i++) { + const chunk = await getChunk(this.#file!, i * getMaxChunksSize(this.totalBytes), (i + 1) * getMaxChunksSize(this.totalBytes)) + promises.push(queue.add(() => this.#uploadChunk(chunk, join(temporaryUrl, String(i))))) + } + this.status = UploadStatus.UPLOADING + + queue.add(async () => { + try { + await Promise.all(promises) + // Send the assemble request + this.status = UploadStatus.ASSEMBLING + await queue.add(async () => { + await axios.request({ + method: 'MOVE', + url: `${temporaryUrl}/.file`, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#file!), + 'OC-Total-Length': this.#file!.size, + Destination: this.source, + }, + }) + }) + this.status = UploadStatus.FINISHED + } catch (error) { + this.cancel() + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + }) + } + + /** + * Internal helper to share logic for uploading a chunk of data for both chunked and non-chunked uploads. + * + * @param chunk - The chunk to upload + * @param url - The target URL + */ + async #uploadChunk(chunk: Blob, url: string) { + try { + await uploadData( + url, + chunk, + { + signal: this.signal, + onUploadProgress: ({ bytes }) => { + // As this is only the sent bytes not the processed ones we only count 90%. + // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. + this.uploadedBytes += bytes * 0.9 + this.dispatchTypedEvent('progress', new CustomEvent('progress', { detail: this })) + }, + onUploadRetry: () => { + this.uploadedBytes = 0 + }, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#file!), + 'Content-Type': this.#file!.type, + }, + }, + ) + + // Update progress - now we set the uploaded size to 100% of the file size + this.uploadedBytes = this.totalBytes + this.status = UploadStatus.FINISHED + } catch (error) { + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } + + this.status = UploadStatus.FAILED + throw new UploadFailedError(error) + } + } +} + +/** + * Converts a FileSystemFileEntry to a File if needed and returns it. + * + * @param fileHandle - The file handle + */ +async function getFile(fileHandle: File | FileSystemFileEntry): Promise { + if (fileHandle instanceof File) { + return fileHandle + } + + return await new Promise((resolve, reject) => fileHandle.file(resolve, reject)) +} diff --git a/lib/upload/uploader/UploadFileTree.ts b/lib/upload/uploader/UploadFileTree.ts new file mode 100644 index 00000000..8d808a50 --- /dev/null +++ b/lib/upload/uploader/UploadFileTree.ts @@ -0,0 +1,206 @@ +/*! + * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import type PQueue from 'p-queue' +import type { IUpload, TUploadStatus } from './Upload.ts' + +import axios, { isAxiosError } from '@nextcloud/axios' +import { basename, join } from '@nextcloud/paths' +import { UploadCancelledError } from '../errors/UploadCancelledError.ts' +import { UploadFailedError } from '../errors/UploadFailedError.ts' +import { Directory as FileTree } from '../utils/fileTree.ts' +import { getMtimeHeader, isRequestAborted } from '../utils/requests.ts' +import { Upload, UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' + +/** + * Callback type for conflict resolution when uploading a folder tree. + * + * The callback receives the nodes in the current folder and the current path to upload to, + * it should return a list of nodes that should be uploaded (e.g. after resolving conflicts by renaming or selecting which files to upload). + * In case the upload should be cancelled, it should return `false`. + * The returned mapping allowes to resolve conflicts by renaming files or folders before upload, + * the key is the original name of the node and the value is the name to upload it as. + * + * @param nodes - The nodes to upload (list of filenames) + * @param currentPath - The current path to upload to + * @return A promise that resolves to a list of nodes that should be uploaded or false if the upload should be cancelled + */ +export type ConflictsCallback = (nodes: string[], currentPath: string) => Promise> + +/** + * A class representing a single file to be uploaded + */ +export class UploadFileTree extends Upload implements IUpload { + /** Customer headers passed */ + #customHeaders: Record + /** The current file tree to upload */ + #directory: FileTree + /** Whether chunking is disabled */ + #noChunking: boolean + /** Children uploads of this parent folder upload */ + #children: (Upload & IUpload)[] = [] + /** The callback to handle conflicts */ + #conflictsCallback?: ConflictsCallback + + /** Whether we need to check for conflicts or not (newly created parent folders = no conflict resolution needed) */ + protected needConflictResolution = true + + public source: string + public status: TUploadStatus = UploadStatus.INITIALIZED + public startTime?: number + public totalBytes: number = 0 + public uploadedBytes: number = -1 + + constructor( + destination: string, + directory: FileTree, + options: { + callback?: ConflictsCallback + headers?: Record + noChunking?: boolean + }, + ) { + super() + const { + headers = {}, + noChunking = false, + } = options + + // exposed state + this.source = destination + this.#directory = directory + this.#customHeaders = headers + this.#noChunking = noChunking + + this.signal.addEventListener('abort', () => { + for (const child of this.#children) { + child.cancel() + } + if (this.status !== UploadStatus.FAILED) { + this.status = UploadStatus.CANCELLED + } + }) + } + + get isChunked(): boolean { + return false + } + + /** + * Set up all child uploads for this upload tree. + */ + initialize(): (Upload & IUpload)[] { + for (const child of this.#directory.children) { + if (child instanceof FileTree) { + const upload = new UploadFileTree( + join(this.source, child.originalName), + child, + { + callback: this.#conflictsCallback, + headers: this.#customHeaders, + noChunking: this.#noChunking, + }, + ) + this.#children.push(upload, ...upload.initialize()) + } else { + const upload = new UploadFile( + join(this.source, child.name), + child, + { headers: this.#customHeaders, noChunking: this.#noChunking }, + ) + this.#children.push(upload) + } + } + return this.#children + } + + async start(queue: PQueue): Promise { + if (this.status !== UploadStatus.INITIALIZED) { + throw new Error('Upload already started') + } + this.status = UploadStatus.SCHEDULED + this.startTime = Date.now() + this.uploadedBytes = 0 + + this.status = UploadStatus.UPLOADING + await this.#createDirectory(queue) + if (this.needConflictResolution && this.#conflictsCallback) { + const nodes = await this.#conflictsCallback( + this.#directory.children.map((node) => basename(node.name)), + this.source, + ) + if (nodes === false) { + this.cancel() + return + } + + for (const [originalName, newName] of Object.entries(nodes)) { + const upload = this.#children.find((child) => basename(child.source) === originalName) + if (upload) { + Object.defineProperty(upload, 'source', { value: join(this.source, newName) }) + } + } + } + + const uploads: Promise[] = [] + for (const upload of this.#children) { + uploads.push(upload.start(queue)) + // for folder tree uploads store the conflict resolution state to prevent useless requests + if (upload instanceof UploadFileTree) { + upload.needConflictResolution = this.needConflictResolution + } + } + + try { + await Promise.all(uploads) + this.status = UploadStatus.FINISHED + } catch (error) { + this.cancel() + if (isRequestAborted(error)) { + this.status = UploadStatus.CANCELLED + throw new UploadCancelledError(error) + } else if (error instanceof UploadCancelledError) { + this.status = UploadStatus.CANCELLED + throw error + } else if (error instanceof UploadFailedError) { + this.status = UploadStatus.FAILED + throw error + } + } finally { + this.dispatchTypedEvent('finished', new CustomEvent('finished', { detail: this })) + } + } + + /** + * Helper to create the directory for this tree. + * + * @param queue - The job queue + */ + async #createDirectory(queue: PQueue): Promise { + await queue.add(async () => { + try { + await axios.request({ + method: 'MKCOL', + url: this.source, + headers: { + ...this.#customHeaders, + ...getMtimeHeader(this.#directory), + }, + signal: this.signal, + }) + // MKCOL worked so this is a new directory, no conflict resolution needed + this.needConflictResolution = false + } catch (error) { + // ignore 405 Method Not Allowed as it means the directory already exists and we can continue with uploading the children + if (isAxiosError(error) && error.response?.status === 405) { + this.needConflictResolution = true + return + } + throw error + } + }) + } +} diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts index 14efbfd0..f30e38aa 100644 --- a/lib/upload/uploader/Uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -3,29 +3,23 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -import type { AxiosError, AxiosResponse } from 'axios' -import type { WebDAVClient } from 'webdav' import type { IFolder } from '../../node/folder.ts' -import type { IDirectory } from '../utils/fileTree.ts' +import type { IUpload } from './Upload.ts' +import type { ConflictsCallback } from './UploadFileTree.ts' import { getCurrentUser } from '@nextcloud/auth' -import axios, { isCancel } from '@nextcloud/axios' -import { getCapabilities } from '@nextcloud/capabilities' -import { encodePath } from '@nextcloud/paths' import PQueue from 'p-queue' -import { normalize } from 'path' -import { defaultRemoteURL, defaultRootPath, getClient } from '../../dav/dav.ts' +import { TypedEventTarget } from 'typescript-event-target' +import { defaultRemoteURL, defaultRootPath } from '../../dav/dav.ts' import { FileType, Folder } from '../../node/index.ts' import { Permission } from '../../permissions.ts' import logger from '../../utils/logger.ts' -import { UploadCancelledError } from '../errors/UploadCancelledError.ts' -import { getMaxChunksSize } from '../utils/config.ts' -import { isFileSystemFileEntry } from '../utils/filesystem.ts' +import { getMaxChunksSize, getMaxParallelUploads } from '../utils/config.ts' import { Directory } from '../utils/fileTree.ts' -import { t } from '../utils/l10n.ts' -import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' import { Eta } from './Eta.ts' -import { Upload, UploadStatus } from './Upload.ts' +import { UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' +import { UploadFileTree } from './UploadFileTree.ts' export const UploaderStatus = Object.freeze({ IDLE: 0, @@ -44,7 +38,8 @@ interface BaseOptions { interface UploadOptions extends BaseOptions { /** - * The root folder where to upload + * The root folder where to upload. + * Allows to override the current root of the uploader for this upload */ root?: string @@ -56,38 +51,27 @@ interface UploadOptions extends BaseOptions { retries?: number } -interface DirectoryUploadOptions extends BaseOptions { - destination: string - directory: Directory - client: WebDAVClient +interface BatchUploadOptions extends UploadOptions { + callback?: ConflictsCallback } -interface BatchUploadOptions extends BaseOptions { - callback?: (nodes: Array, currentPath: string) => Promise | false> +interface UploaderEventsMap { + paused: CustomEvent + resumed: CustomEvent + progress: CustomEvent + finished: CustomEvent } -export class Uploader { - // Initialized via setter in the constructor - private _destinationFolder!: IFolder - private _isPublic: boolean - private _customHeaders: Record - - // Global upload queue - private _uploadQueue: Array = [] - private _jobQueue: PQueue = new PQueue({ - // Maximum number of concurrent uploads - // @ts-expect-error TS2339 Object has no defined properties - concurrency: getCapabilities().files?.chunked_upload?.max_parallel_count ?? 5, +export class Uploader extends TypedEventTarget { + #eta = new Eta() + #destinationFolder: IFolder + #customHeaders: Map = new Map() + #status: TUploaderStatus = UploaderStatus.IDLE + #uploadQueue: IUpload[] = [] + #jobQueue: PQueue = new PQueue({ + concurrency: getMaxParallelUploads(), }) - private _queueSize = 0 - private _queueProgress = 0 - private _queueStatus: TUploaderStatus = UploaderStatus.IDLE - - private _eta = new Eta() - - private _notifiers: Array<(upload: Upload) => void> = [] - /** * Initialize uploader * @@ -98,9 +82,7 @@ export class Uploader { isPublic = false, destinationFolder?: IFolder, ) { - this._isPublic = isPublic - this._customHeaders = {} - + super() if (!destinationFolder) { const source = `${defaultRemoteURL}${defaultRootPath}` let owner: string @@ -123,47 +105,43 @@ export class Uploader { source, }) } - this.destination = destinationFolder + this.#destinationFolder = destinationFolder logger.debug('Upload workspace initialized', { - destination: this.destination, - root: this.root, + root: this.#destinationFolder.source, isPublic, maxChunksSize: getMaxChunksSize(), }) } + public get status(): TUploaderStatus { + return this.#status + } + /** - * Get the upload destination path relative to the root folder + * Get the upload destination folder */ - get destination(): IFolder { - return this._destinationFolder + public get destination(): IFolder { + return this.#destinationFolder } /** * Set the upload destination path relative to the root folder */ - set destination(folder: IFolder) { + public set destination(folder: IFolder) { if (!folder || folder.type !== FileType.Folder || !folder.source) { throw new Error('Invalid destination folder') } logger.debug('Destination set', { folder }) - this._destinationFolder = folder - } - - /** - * Get the root folder - */ - get root() { - return this._destinationFolder.source + this.#destinationFolder = folder } /** * Get registered custom headers for uploads */ - get customHeaders(): Record { - return structuredClone(this._customHeaders) + public get customHeaders(): Map { + return structuredClone(this.#customHeaders) } /** @@ -172,8 +150,8 @@ export class Uploader { * @param name The header to set * @param value The string value */ - setCustomHeader(name: string, value: string = ''): void { - this._customHeaders[name] = value + public setCustomHeader(name: string, value: string = ''): void { + this.#customHeaders.set(name, value) } /** @@ -181,42 +159,27 @@ export class Uploader { * * @param name The header to unset */ - deleteCustomerHeader(name: string): void { - delete this._customHeaders[name] + public deleteCustomerHeader(name: string): void { + this.#customHeaders.delete(name) } /** * Get the upload queue */ - get queue(): Upload[] { - return this._uploadQueue - } - - private reset() { - // Reset the ETA - this._eta.reset() - // If there is no upload in the queue and no job in the queue - if (this._uploadQueue.length === 0 && this._jobQueue.size === 0) { - return - } - - // Reset upload queue but keep the reference - this._uploadQueue.splice(0, this._uploadQueue.length) - this._jobQueue.clear() - this._queueSize = 0 - this._queueProgress = 0 - this._queueStatus = UploaderStatus.IDLE - logger.debug('Uploader state reset') + public get queue(): IUpload[] { + return [...this.#uploadQueue] } /** - * Pause any ongoing upload(s) + * Pause the uploader. + * Already started uploads will continue, but all other (not yet started) uploads + * will be paused until `start()` is called. */ - public pause() { - this._eta.pause() - this._jobQueue.pause() - this._queueStatus = UploaderStatus.PAUSED - this.updateStats() + public async pause() { + this.#jobQueue.pause() + this.#status = UploaderStatus.PAUSED + await this.#jobQueue.onPendingZero() + this.dispatchTypedEvent('paused', new CustomEvent('paused')) logger.debug('Uploader paused') } @@ -224,70 +187,22 @@ export class Uploader { * Resume any pending upload(s) */ public start() { - this._eta.resume() - this._jobQueue.start() - this._queueStatus = UploaderStatus.UPLOADING - this.updateStats() + this.#jobQueue.start() + this.#status = UploaderStatus.UPLOADING + this.dispatchTypedEvent('resumed', new CustomEvent('resumed')) logger.debug('Uploader resumed') } - /** - * Get the estimation for the uploading time. - */ - get eta(): Eta { - return this._eta - } - - /** - * Get the upload queue stats - */ - get info() { - return { - size: this._queueSize, - progress: this._queueProgress, - status: this._queueStatus, - } - } - - private updateStats() { - const size = this._uploadQueue.map((upload) => upload.size) - .reduce((partialSum, a) => partialSum + a, 0) - const uploaded = this._uploadQueue.map((upload) => upload.uploaded) - .reduce((partialSum, a) => partialSum + a, 0) - - this._eta.update(uploaded, size) - this._queueSize = size - this._queueProgress = uploaded - - // If already paused keep it that way - if (this._queueStatus !== UploaderStatus.PAUSED) { - const pending = this._uploadQueue.find(({ status }) => ([UploadStatus.INITIALIZED, UploadStatus.UPLOADING, UploadStatus.ASSEMBLING] as number[]).includes(status)) - if (this._jobQueue.size > 0 || pending) { - this._queueStatus = UploaderStatus.UPLOADING - } else { - this.eta.reset() - this._queueStatus = UploaderStatus.IDLE - } + public reset() { + for (const upload of this.#uploadQueue) { + upload.cancel() } - } - addNotifier(notifier: (upload: Upload) => void) { - this._notifiers.push(notifier) - } - - /** - * Notify listeners of the upload completion - * - * @param upload The upload that finished - */ - private _notifyAll(upload: Upload): void { - for (const notifier of this._notifiers) { - try { - notifier(upload) - } catch (error) { - logger.warn('Error in upload notifier', { error, source: upload.source }) - } - } + this.#uploadQueue = [] + this.#jobQueue.clear() + this.#eta.reset() + this.#status = UploaderStatus.IDLE + logger.debug('Uploader reset') } /** @@ -323,155 +238,32 @@ export class Uploader { * } * ``` */ - async batchUpload( + public async batchUpload( destination: string, files: (File | FileSystemEntry)[], options?: BatchUploadOptions, - ): Promise { + ): Promise { const rootFolder = new Directory('') await rootFolder.addChildren(files) // create a meta upload to ensure all ongoing child requests are listed - const target = `${this.root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` - const upload = new Upload(target, false, 0, rootFolder) - upload.status = UploadStatus.UPLOADING - this._uploadQueue.push(upload) - - logger.debug('Starting new batch upload', { target }) - try { - // setup client with root and custom header - const client = getClient(this.root, this._customHeaders) - // Create the promise for the virtual root directory - const promise = this.uploadDirectory({ - ...options, - destination, - directory: rootFolder, - client, - }) - // await the uploads and resolve with "finished" status - const uploads = await promise - upload.status = UploadStatus.FINISHED - return uploads - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError || (error instanceof DOMException && error.name === 'AbortError')) { - logger.info('Upload cancelled by user', { error }) - upload.status = UploadStatus.CANCELLED - throw new UploadCancelledError(error) - } else { - logger.error('Error in batch upload', { error }) - upload.status = UploadStatus.FAILED - throw error - } - } finally { - // Upload queue is cleared when all the uploading jobs are done - // Meta upload unlike real uploading does not create a job - // Removing it manually here to make sure it is remove even when no uploading happened and there was nothing to finish - this._uploadQueue.splice(this._uploadQueue.indexOf(upload), 1) - this._notifyAll(upload) - this.updateStats() - } - } - - /** - * Helper to create a directory wrapped inside an Upload class - * - * @param options - the options for the directory upload - * @param options.destination Destination where to create the directory - * @param options.directory The directory to create - * @param options.client The cached WebDAV client - */ - private async createDirectory(options: DirectoryUploadOptions): Promise { - if (!options.directory.name) { - throw new Error('Can not create empty directory') - } - - const folderPath = normalize(`${options.destination}/${options.directory.name}`).replace(/\/$/, '') - const rootPath = `${this.root.replace(/\/$/, '')}/${folderPath.replace(/^\//, '')}` - - // Add a new upload to the upload queue - const currentUpload: Upload = new Upload(rootPath, false, 0, options.directory) - if (options.signal) { - options.signal.addEventListener('abort', currentUpload.cancel) - } - this._uploadQueue.push(currentUpload) - - try { - // Add the request to the job queue -> wait for finish to resolve the promise - return await this._jobQueue.add(async () => { - currentUpload.status = UploadStatus.UPLOADING - await options.client.createDirectory(folderPath, { signal: currentUpload.signal }) - return currentUpload - }) - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError || (error instanceof DOMException && error.name === 'AbortError')) { - currentUpload.status = UploadStatus.CANCELLED - throw new UploadCancelledError(error) - } else if (error && typeof error === 'object' && 'status' in error && error.status === 405) { - // Directory already exists, so just write into it and ignore the error - logger.debug('Directory already exists, writing into it', { directory: options.directory.name }) - currentUpload.status = UploadStatus.FINISHED - return currentUpload - } else { - // Another error happened, so abort uploading the directory - currentUpload.status = UploadStatus.FAILED - throw error - } - } finally { - // Update statistics - this._notifyAll(currentUpload) - this.updateStats() - } - } - - // Helper for uploading directories (recursively) - private async uploadDirectory(options: BatchUploadOptions & DirectoryUploadOptions): Promise { - // we use an internal abort controller to also cancel uploads if an error happened. - // So if a signal is provided we connect it to our controller. - const internalAbortController = new AbortController() - if (options.signal) { - options.signal.addEventListener('abort', () => internalAbortController.abort()) - } - - const internalOptions = { ...options, signal: internalAbortController.signal } - const folderPath = normalize(`${internalOptions.destination}/${internalOptions.directory.name}`).replace(/\/$/, '') - - // Let the user handle conflicts - const selectedForUpload = await (internalOptions.callback?.(internalOptions.directory.children, folderPath) ?? internalOptions.directory.children) - if (selectedForUpload === false) { - logger.debug('Upload canceled by user', { directory: internalOptions.directory }) - throw new UploadCancelledError('Conflict resolution cancelled by user') - } else if (selectedForUpload.length === 0 && internalOptions.directory.children.length > 0) { - logger.debug('Skipping directory, as all files were skipped by user', { directory: internalOptions.directory }) - return [] + const target = `${this.destination.source.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const headers = Object.fromEntries(this.#customHeaders.entries()) + const upload = new UploadFileTree( + target, + rootFolder, + { ...options, headers }, + ) + if (options?.signal) { + options.signal.addEventListener('abort', upload.cancel) } - logger.debug('Start directory upload', { directory: internalOptions.directory }) - const directories: Promise[] = [] - const uploads: Promise[] = [] - try { - if (internalOptions.directory.name) { - // If not the virtual root we need to create the directory first before uploading - // Make sure the promise is listed in the final result - uploads.push(this.createDirectory(internalOptions)) - // Ensure the directory is created before uploading / creating children - await uploads.at(-1) - } - - for (const node of selectedForUpload) { - if (node instanceof Directory) { - directories.push(this.uploadDirectory({ ...internalOptions, directory: node })) - } else { - uploads.push(this.upload(`${folderPath}/${node.name}`, node, { signal: internalOptions.signal })) - } - } - - const resolvedUploads = await Promise.all(uploads) - const resolvedDirectoryUploads = await Promise.all(directories) - return [resolvedUploads, ...resolvedDirectoryUploads].flat() - } catch (e) { - // Ensure a failure cancels all other requests - internalAbortController.abort() - throw e + const uploads = [...upload.initialize(), upload] + for (const upload of uploads) { + this.#attachEventListeners(upload) } + this.#uploadQueue.push(...uploads) + await upload.start(this.#jobQueue) + return uploads } /** @@ -481,247 +273,59 @@ export class Uploader { * @param fileHandle - The file to upload * @param options - Optional parameters */ - async upload(destination: string, fileHandle: File | FileSystemFileEntry, options?: UploadOptions): Promise { - const root = options?.root ?? this.root - const destinationPath = `${root.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` - - // Get the encoded source url to this object for requests purposes - const { origin } = new URL(destinationPath) - const encodedDestinationFile = origin + encodePath(destinationPath.slice(origin.length)) - - this.eta.resume() - logger.debug(`Uploading ${fileHandle.name} to ${encodedDestinationFile}`) - - // Handle file system entries by retrieving the file handle - if (isFileSystemFileEntry(fileHandle)) { - fileHandle = await new Promise((resolve, reject) => (fileHandle as FileSystemFileEntry).file(resolve, reject)) - } - // We can cast here as we handled system entries in the if above - const file = fileHandle as File - - // @ts-expect-error TS2339 Object has no defined properties - const supportsPublicChunking = getCapabilities().dav?.public_shares_chunking ?? false - const maxChunkSize = getMaxChunksSize('size' in file ? file.size : undefined) - // If manually disabled or if the file is too small - const disabledChunkUpload = (this._isPublic && !supportsPublicChunking) - || maxChunkSize === 0 - || ('size' in file && file.size < maxChunkSize) - - const upload = new Upload(destinationPath, !disabledChunkUpload, file.size, file) - this._uploadQueue.push(upload) - this.updateStats() - - // Register cancellation caller + public async upload(destination: string, fileHandle: File | FileSystemFileEntry, options?: UploadOptions): Promise { + const target = `${this.destination.source.replace(/\/$/, '')}/${destination.replace(/^\//, '')}` + const headers = Object.fromEntries(this.#customHeaders.entries()) + const upload = new UploadFile(target, fileHandle, { ...options, headers }) if (options?.signal) { options.signal.addEventListener('abort', upload.cancel) } - const retries = options?.retries ?? 5 - if (!disabledChunkUpload) { - logger.debug('Initializing chunked upload', { file, upload }) - - // Let's initialize a chunk upload - const tempUrl = await initChunkWorkspace(encodedDestinationFile, retries, this._isPublic, this._customHeaders) - const chunksQueue: Array> = [] - - // Generate chunks array - for (let chunk = 0; chunk < upload.chunks; chunk++) { - const bufferStart = chunk * maxChunkSize - // Don't go further than the file size - const bufferEnd = Math.min(bufferStart + maxChunkSize, upload.size) - // Make it a Promise function for better memory management - const blob = () => getChunk(file, bufferStart, maxChunkSize) - - // Init request queue - const request = () => { - // bytes uploaded on this chunk (as upload.uploaded tracks all chunks) - let chunkBytes = 0 - return uploadData( - `${tempUrl}/${chunk + 1}`, - blob, - { - signal: upload.signal, - destinationFile: encodedDestinationFile, - retries, - onUploadProgress: ({ bytes }) => { - // Only count 90% of bytes as the request is not yet processed by server - // we set the remaining 10% when the request finished (server responded). - const progressBytes = bytes * 0.9 - chunkBytes += progressBytes - upload.uploaded += progressBytes - this.updateStats() - }, - onUploadRetry: () => { - // Current try failed, so reset the stats for this chunk - // meaning remove the uploaded chunk bytes from stats - upload.uploaded -= chunkBytes - chunkBytes = 0 - this.updateStats() - }, - headers: { - ...this._customHeaders, - ...this._mtimeHeader(file), - 'OC-Total-Length': file.size, - 'Content-Type': 'application/octet-stream', - }, - }, - ) - // Update upload progress on chunk completion - .then(() => { - // request fully done so we uploaded the full chunk - // we first remove the intermediate chunkBytes from progress events - // and then add the real full size - upload.uploaded += bufferEnd - bufferStart - chunkBytes - this.updateStats() - }) - .catch((error) => { - if (error?.response?.status === 507) { - logger.error('Upload failed, not enough space on the server or quota exceeded. Cancelling the remaining chunks', { error, upload }) - upload.cancel() - upload.status = UploadStatus.FAILED - throw error - } - - if (!isCancel(error)) { - logger.error(`Chunk ${chunk + 1} ${bufferStart} - ${bufferEnd} uploading failed`, { error, upload }) - upload.cancel() - upload.status = UploadStatus.FAILED - } - throw error - }) - } - chunksQueue.push(this._jobQueue.add(request)) - } + this.#attachEventListeners(upload) + this.#uploadQueue.push(upload) + await upload.start(this.#jobQueue) + return upload + } - const request = async () => { - try { - // Once all chunks are sent, assemble the final file - await Promise.all(chunksQueue) - - // Assemble the chunks - upload.status = UploadStatus.ASSEMBLING - this.updateStats() - - // Send the assemble request - upload.response = await axios.request({ - method: 'MOVE', - url: `${tempUrl}/.file`, - headers: { - ...this._customHeaders, - ...this._mtimeHeader(file), - 'OC-Total-Length': file.size, - Destination: encodedDestinationFile, - }, - }) - upload.status = UploadStatus.FINISHED - this.updateStats() - - logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) - return upload - } catch (error) { - // Cleaning up temp directory - axios.request({ - method: 'DELETE', - url: `${tempUrl}`, - }) - - if (isCancel(error) || error instanceof UploadCancelledError) { - upload.status = UploadStatus.CANCELLED - throw new UploadCancelledError(error) - } else { - upload.status = UploadStatus.FAILED - throw new Error(t('Failed to assemble the chunks together')) - } - } finally { - // Notify listeners of the upload completion - this._notifyAll(upload) - } - } + /** + * Handle the progress event of an upload. + * Update the ETA and dispatch a progress event for the uploader. + */ + #onProgress() { + const totalBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.totalBytes, 0) + const uploadedBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.uploadedBytes, 0) + this.#eta.update(uploadedBytes, totalBytes) + this.dispatchTypedEvent('progress', new CustomEvent('progress')) + } - this._jobQueue.add(request) - } else { - logger.debug('Initializing regular upload', { file, upload }) - - // Generating upload limit - const blob = await getChunk(file, 0, upload.size) - const request = async () => { - try { - upload.response = await uploadData( - encodedDestinationFile, - blob, - { - signal: upload.signal, - onUploadProgress: ({ bytes }) => { - // As this is only the sent bytes not the processed ones we only count 90%. - // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. - upload.uploaded += bytes * 0.9 - this.updateStats() - }, - onUploadRetry: () => { - upload.uploaded = 0 - this.updateStats() - }, - headers: { - ...this._customHeaders, - ...this._mtimeHeader(file), - 'Content-Type': file.type, - }, - }, - ) - - // Update progress - now we set the uploaded size to 100% of the file size - upload.uploaded = upload.size - this.updateStats() - - // Resolve - logger.debug(`Successfully uploaded ${file.name}`, { file, upload }) - return upload - } catch (error) { - if (isCancel(error) || error instanceof UploadCancelledError) { - upload.status = UploadStatus.CANCELLED - throw new UploadCancelledError(error) - } - - // Attach response to the upload object - if ((error as AxiosError)?.response) { - upload.response = (error as AxiosError).response as AxiosResponse - } - - upload.status = UploadStatus.FAILED - logger.error(`Failed uploading ${file.name}`, { error, file, upload }) - throw new Error(t('Failed to upload the file')) - } finally { - // Notify listeners of the upload completion - this._notifyAll(upload) - } - } - this._jobQueue.add(request) + /** + * Handle the finished event of an upload. + * + * 1. Update the progress + * 2. if all uploads are finished dispatch a finished event for the uploader and clear the queue + */ + async #onFinished() { + this.#onProgress() + + const finalStates = [ + UploadStatus.FINISHED, + UploadStatus.CANCELLED, + UploadStatus.FAILED, + ] as number[] + if (this.#uploadQueue.every((upload) => finalStates.includes(upload.status))) { + await this.#jobQueue.onIdle() + this.dispatchTypedEvent('finished', new CustomEvent('finished')) + this.reset() } - - // Reset when upload queue is done - // Only when we know we're closing on the last chunks - // and/or assembling we can reset the uploader. - // Otherwise he queue might be idle for a short time - // and clear the Upload queue before we're done. - this._jobQueue.onIdle() - .then(() => this.reset()) - - // Finally return the Upload - return upload } /** - * Create modification time headers if valid value is available. - * It can be invalid on Android devices if SD cards with NTFS / FAT are used, - * as those files might use the NT epoch for time so the value will be negative. + * Attach progress listeners to an upload. * - * @param file The file to upload + * @param upload - The upload to attach listeners to */ - private _mtimeHeader(file: File): { 'X-OC-Mtime'?: number } { - const mtime = Math.floor(file.lastModified / 1000) - if (mtime > 0) { - return { 'X-OC-Mtime': mtime } - } - return {} + #attachEventListeners(upload: IUpload) { + upload.addEventListener('progress', this.#onProgress) + upload.addEventListener('finished', this.#onFinished) } } diff --git a/lib/upload/uploader/index.ts b/lib/upload/uploader/index.ts index 13de3ad6..3f4f94d2 100644 --- a/lib/upload/uploader/index.ts +++ b/lib/upload/uploader/index.ts @@ -3,14 +3,9 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ -export { - type Eta, - type EtaEventsMap, +export type { Eta } from './Eta.ts' +export type { IUpload } from './Upload.ts' - EtaStatus, -} from './Eta.ts' - -export { - Uploader, - UploaderStatus, -} from './Uploader.ts' +export { EtaStatus } from './Eta.ts' +export { UploadStatus } from './Upload.ts' +export { Uploader, UploaderStatus } from './Uploader.ts' diff --git a/lib/upload/utils/config.ts b/lib/upload/utils/config.ts index 511460a6..1210f023 100644 --- a/lib/upload/utils/config.ts +++ b/lib/upload/utils/config.ts @@ -3,6 +3,17 @@ * SPDX-License-Identifier: AGPL-3.0-or-later */ +import { getCapabilities } from '@nextcloud/capabilities' + +/** + * Get the maximum number of parallel uploads based on the server configuration. + */ +export function getMaxParallelUploads(): number { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const capabilities = getCapabilities() as Record + return capabilities.files?.chunked_upload?.max_parallel_count ?? 5 +} + /** * Get the maximum chunk size for chunked uploads based on the server configuration and file size. * diff --git a/lib/upload/utils/conflicts.ts b/lib/upload/utils/conflicts.ts deleted file mode 100644 index 2d4e5adb..00000000 --- a/lib/upload/utils/conflicts.ts +++ /dev/null @@ -1,34 +0,0 @@ -/*! - * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors - * SPDX-License-Identifier: AGPL-3.0-or-later - */ - -import type { INode } from '../../node/index.ts' - -/** - * Check if there is a conflict between two sets of files - * - * @param files the incoming files - * @param content all the existing files in the directory - * @return true if there is a conflict - */ -export function hasConflict(files: (File | FileSystemEntry | INode)[], content: INode[]): boolean { - return getConflicts(files, content).length > 0 -} - -/** - * Get the conflicts between two sets of files - * - * @param files the incoming files - * @param content all the existing files in the directory - * @return true if there is a conflict - */ -export function getConflicts(files: T[], content: INode[]): T[] { - const contentNames = content.map((node: INode) => node.basename) - const conflicts = files.filter((node: File | FileSystemEntry | INode) => { - const name = 'basename' in node ? node.basename : node.name - return contentNames.indexOf(name) !== -1 - }) - - return conflicts -} diff --git a/lib/upload/utils/requests.spec.ts b/lib/upload/utils/requests.spec.ts new file mode 100644 index 00000000..4632d2bd --- /dev/null +++ b/lib/upload/utils/requests.spec.ts @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-or-later + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + */ + +import { CanceledError } from 'axios' +import { expect, test } from 'vitest' +import { getMtimeHeader, isRequestAborted } from './requests.ts' + +test('getMtimeHeader - valid mtime', () => { + const file = new File([''], 'test.txt', { lastModified: 1620000000000 }) + const headers = getMtimeHeader(file) + expect(headers).toHaveProperty('X-OC-Mtime', 1620000000) +}) + +test('getMtimeHeader - invalid mtime', () => { + const file = new File([''], 'test.txt', { lastModified: -1000 }) + const headers = getMtimeHeader(file) + expect(headers).not.toHaveProperty('X-OC-Mtime') +}) + +test('isRequestAborted - axios cancel error', () => { + const error = new CanceledError('Upload cancelled') + expect(isRequestAborted(error)).toBe(true) +}) + +test('isRequestAborted - DOMException with AbortError name', () => { + const error = new DOMException('Upload cancelled', 'AbortError') + expect(isRequestAborted(error)).toBe(true) +}) + +test('isRequestAborted - other error', () => { + const error = new Error('Some other error') + expect(isRequestAborted(error)).toBe(false) +}) diff --git a/lib/upload/utils/requests.ts b/lib/upload/utils/requests.ts new file mode 100644 index 00000000..36fe4e17 --- /dev/null +++ b/lib/upload/utils/requests.ts @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-or-later + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + */ + +import { isCancel } from '@nextcloud/axios' + +/** + * Create modification time headers if valid value is available. + * It can be invalid on Android devices if SD cards with NTFS / FAT are used, + * as those files might use the NT epoch for time so the value will be negative. + * + * @param file - The file to upload + */ +export function getMtimeHeader(file: File): { 'X-OC-Mtime'?: number } { + const mtime = Math.floor(file.lastModified / 1000) + if (mtime > 0) { + return { 'X-OC-Mtime': mtime } + } + return {} +} + +/** + * Check if the given error is an abort error + * + * @param error - Error to check + */ +export function isRequestAborted(error: unknown): boolean { + return isCancel(error) + || (error instanceof DOMException && error.name === 'AbortError') +} From 4cbf1efa2b6ef11c119f6ba7706ae5e1b66f3f0d Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Thu, 19 Feb 2026 01:40:17 +0100 Subject: [PATCH 11/14] refactor(upload): remove localization - this is pure API Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/Eta.spec.ts | 16 +-------------- lib/upload/uploader/Eta.ts | 20 +----------------- lib/upload/utils/l10n.ts | 36 --------------------------------- 3 files changed, 2 insertions(+), 70 deletions(-) delete mode 100644 lib/upload/utils/l10n.ts diff --git a/lib/upload/uploader/Eta.spec.ts b/lib/upload/uploader/Eta.spec.ts index 57a46486..f5863a2c 100644 --- a/lib/upload/uploader/Eta.spec.ts +++ b/lib/upload/uploader/Eta.spec.ts @@ -11,7 +11,6 @@ describe('ETA - status', () => { const eta = new Eta() expect(eta.progress).toBe(0) expect(eta.time).toBe(Infinity) - expect(eta.timeReadable).toBe('estimating time left') expect(eta.speed).toBe(-1) expect(eta.status).toBe(EtaStatus.Idle) }) @@ -21,7 +20,6 @@ describe('ETA - status', () => { expect(eta.status).toBe(EtaStatus.Running) expect(eta.progress).toBe(0) expect(eta.time).toBe(Infinity) - expect(eta.timeReadable).toBe('estimating time left') expect(eta.speed).toBe(-1) }) @@ -82,12 +80,11 @@ describe('ETA - progress', () => { eta.add(2.5 * 1024 * 1024) expect(eta.progress).toBe(i * 2.5) expect(eta.speed).toBe(-1) - expect(eta.speedReadable).toBe('') expect(eta.time).toBe(Infinity) } // this is reached after (virtual) 3s with 6 * 2.5MiB (=15MiB) data of 100MiB total - expect(eta.timeReadable).toBe('estimating time left') + expect(eta.time).toBe(Infinity) // Adding another 500ms with 5MiB/s will result in enough information for estimating vi.advanceTimersByTime(500) @@ -96,7 +93,6 @@ describe('ETA - progress', () => { expect(eta.speed).toMatchInlineSnapshot('4826778') expect(eta.speedReadable).toMatchInlineSnapshot('"4.6 MB∕s"') expect(eta.time).toMatchInlineSnapshot('18') - expect(eta.timeReadable).toMatchInlineSnapshot('"18 seconds left"') // Skip forward another 4.5seconds for (let i = 0; i < 9; i++) { @@ -109,7 +105,6 @@ describe('ETA - progress', () => { expect(eta.speed).toMatchInlineSnapshot('5060836') expect(eta.speedReadable).toMatchInlineSnapshot('"4.8 MB∕s"') expect(eta.time).toMatchInlineSnapshot('12') - expect(eta.timeReadable).toMatchInlineSnapshot('"12 seconds left"') // Having a spike of 10MiB/s will not result in halfing the eta vi.advanceTimersByTime(500) @@ -120,7 +115,6 @@ describe('ETA - progress', () => { expect(eta.speedReadable).toMatchInlineSnapshot('"5 MB∕s"') // And the time has not halved expect(eta.time).toMatchInlineSnapshot('11') - expect(eta.timeReadable).toMatchInlineSnapshot('"11 seconds left"') // Add another 3 seconds so we should see 'few seconds left' for (let i = 0; i < 6; i++) { @@ -130,7 +124,6 @@ describe('ETA - progress', () => { expect(eta.progress).toBe(60) expect(eta.speed).toMatchInlineSnapshot('5344192') expect(eta.time).toMatchInlineSnapshot('8') - expect(eta.timeReadable).toMatchInlineSnapshot('"a few seconds left"') }) test('long running progress', () => { @@ -151,8 +144,6 @@ describe('ETA - progress', () => { eta.add(512 * 1024) expect(eta.progress).toBe(3.5) expect(eta.time).toBe(105) - // time is over 1 minute so we see the formatted output - expect(eta.timeReadable).toMatchInlineSnapshot('"00:01:45 left"') // Add another minute and we should see only seconds: for (let i = 0; i < 120; i++) { @@ -164,7 +155,6 @@ describe('ETA - progress', () => { // Now we have uploaded 63.5 MiB - so 36.5 MiB missing by having 1MiB/s upload speed we expect 37 seconds left: expect(eta.progress).toBe(63.5) expect(eta.time).toBe(37) - expect(eta.timeReadable).toMatchInlineSnapshot('"37 seconds left"') }) test('progress calculation for fast uploads', () => { @@ -180,7 +170,6 @@ describe('ETA - progress', () => { expect(eta.progress).toBe(20) expect(eta.speed).toBe(-1) expect(eta.time).toBe(Infinity) - expect(eta.timeReadable).toBe('estimating time left') // Now we have some information but not enough for normal estimation // yet we show some information as the upload is very fast (40% per second) @@ -188,7 +177,6 @@ describe('ETA - progress', () => { eta.add(20 * 1024 * 1024) expect(eta.progress).toBe(40) expect(eta.time).toBe(1.5) - expect(eta.timeReadable).toBe('a few seconds left') // still no speed information expect(eta.speed).toBe(-1) @@ -198,7 +186,6 @@ describe('ETA - progress', () => { eta.add(20 * 1024 * 1024) expect(eta.progress).toBe(40 + i * 20) expect(eta.time).toBe(1.5 - (i / 2)) - expect(eta.timeReadable).toBe('a few seconds left') // still no speed information expect(eta.speed).toBe(-1) } @@ -210,7 +197,6 @@ describe('ETA - progress', () => { expect(eta.status).toBe(EtaStatus.Running) expect(eta.progress).toBe(0) expect(eta.time).toBe(Infinity) - expect(eta.timeReadable).toBe('estimating time left') expect(eta.speed).toBe(-1) }) diff --git a/lib/upload/uploader/Eta.ts b/lib/upload/uploader/Eta.ts index cb3bd42a..b80552f7 100644 --- a/lib/upload/uploader/Eta.ts +++ b/lib/upload/uploader/Eta.ts @@ -5,7 +5,6 @@ import { TypedEventTarget } from 'typescript-event-target' import { formatFileSize } from '../../utils/fileSize.ts' -import { n, t } from '../utils/l10n.ts' export const EtaStatus = Object.freeze({ Idle: 0, @@ -183,29 +182,12 @@ export class Eta extends TypedEventTarget { /** * Estimated time in seconds. + * If the time is not yet estimated, it will return `Infinity`. */ public get time(): number { return this._eta } - /** - * Human readable version of the estimated time. - */ - public get timeReadable(): string { - if (this._eta === Infinity) { - return t('estimating time left') - } else if (this._eta < 10) { - return t('a few seconds left') - } else if (this._eta < 60) { - return n('{seconds} seconds left', '{seconds} seconds left', this._eta, { seconds: this._eta }) - } - - const hours = String(Math.floor(this._eta / 3600)).padStart(2, '0') - const minutes = String(Math.floor((this._eta % 3600) / 60)).padStart(2, '0') - const seconds = String(this._eta % 60).padStart(2, '0') - return t('{time} left', { time: `${hours}:${minutes}:${seconds}` }) // TRANSLATORS time has the format 00:00:00 - } - /** * Transfer speed in bytes per second. * Returns `-1` if not yet estimated. diff --git a/lib/upload/utils/l10n.ts b/lib/upload/utils/l10n.ts deleted file mode 100644 index d067afb5..00000000 --- a/lib/upload/utils/l10n.ts +++ /dev/null @@ -1,36 +0,0 @@ -/** - * SPDX-FileCopyrightText: 2023 Nextcloud GmbH and Nextcloud contributors - * SPDX-License-Identifier: AGPL-3.0-or-later - */ -import { getGettextBuilder } from '@nextcloud/l10n/gettext' - -const gtBuilder = getGettextBuilder() - .detectLanguage() - -// @ts-expect-error __TRANSLATIONS__ is replaced by vite -__TRANSLATIONS__.map((data) => gtBuilder.addTranslation(data.locale, data.json)) - -interface Gettext { - /** - * Get translated string (singular form), optionally with placeholders - * - * @param original original string to translate - * @param placeholders map of placeholder key to value - */ - gettext(original: string, placeholders?: Record): string - - /** - * Get translated string with plural forms - * - * @param singular Singular text form - * @param plural Plural text form to be used if `count` requires it - * @param count The number to insert into the text - * @param placeholders optional map of placeholder key to value - */ - ngettext(singular: string, plural: string, count: number, placeholders?: Record): string -} - -const gt = gtBuilder.build() as Gettext - -export const n = gt.ngettext.bind(gt) as typeof gt.ngettext -export const t = gt.gettext.bind(gt) as typeof gt.gettext From 1c8e5aba9c341bb38f62d156db544ede90aa8ab1 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Thu, 19 Feb 2026 14:20:43 +0100 Subject: [PATCH 12/14] fixup! refactor(upload): split upload logic into separate classes for files and folders --- lib/upload/uploader/UploadFile.spec.ts | 198 +++++++++++++++++++++++++ lib/upload/uploader/UploadFile.ts | 19 +-- lib/upload/utils/config.ts | 8 + 3 files changed, 210 insertions(+), 15 deletions(-) create mode 100644 lib/upload/uploader/UploadFile.spec.ts diff --git a/lib/upload/uploader/UploadFile.spec.ts b/lib/upload/uploader/UploadFile.spec.ts new file mode 100644 index 00000000..8eae4c20 --- /dev/null +++ b/lib/upload/uploader/UploadFile.spec.ts @@ -0,0 +1,198 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +/*! + * SPDX-FileCopyrightText: 2026 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ + +import { CanceledError } from 'axios' +import { describe, expect, it, vi } from 'vitest' +import { UploadStatus } from './Upload.ts' +import { UploadFile } from './UploadFile.ts' + +const isPublicShareMock = vi.hoisted(() => vi.fn()) +vi.mock('@nextcloud/sharing/public', () => ({ isPublicShare: isPublicShareMock })) + +const initChunkWorkspaceMock = vi.hoisted(() => vi.fn()) +const uploadDataMock = vi.hoisted(() => vi.fn()) +vi.mock('../utils/upload.ts', async () => ({ + ...(await vi.importActual('../utils/upload.ts')), + initChunkWorkspace: initChunkWorkspaceMock, + uploadData: uploadDataMock, +})) + +const getMaxChunksSizeMock = vi.hoisted(() => vi.fn()) +const supportsPublicChunkingMock = vi.hoisted(() => vi.fn()) +vi.mock('../utils/config.ts', () => ({ + getMaxChunksSize: getMaxChunksSizeMock, + supportsPublicChunking: supportsPublicChunkingMock, +})) + +describe('chunking', () => { + it('enables chunking for non-public shares', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(true) + }) + + it('enables chunking for public shares', () => { + isPublicShareMock.mockReturnValue(true) + supportsPublicChunkingMock.mockReturnValue(true) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(true) + }) + + it('disables chunking if too small', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1000)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if explicitly disabled', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: true }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if disabled by admin', () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(0) + const uploadFile = new UploadFile('/destination', new File([], 'filename'), { noChunking: true }) + expect(uploadFile.isChunked).toBe(false) + }) + + it('disables chunking if not supported by public shares', () => { + isPublicShareMock.mockReturnValue(true) + supportsPublicChunkingMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(false) + }) + + it.each([ + [0, 1], + [1024, 1], + [1025, 2], + [2048, 2], + [2049, 3], + ])('calculates number of chunks correctly for file size %i', async (fileSize, expectedChunks) => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(fileSize)], 'filename'), { noChunking: false }) + expect(uploadFile.isChunked).toBe(expectedChunks > 1) + + const { resolve, promise } = Promise.withResolvers() + const queue = { add: vi.fn(() => resolve()) } + uploadFile.start(queue as never) + + // wait for queue to be called + await promise + expect(uploadFile.numberOfChunks).toBe(expectedChunks) + }) +}) + +describe('upload status and events', () => { + it('initialized', () => { + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(2048)], 'filename'), { noChunking: false }) + expect(uploadFile.status).toBe(UploadStatus.INITIALIZED) + }) + + it('scheduled', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + const { resolve, promise } = Promise.withResolvers() + const queue = { add: vi.fn(() => resolve()) } + + uploadFile.start(queue as never) + // wait for queue to be called + await promise + expect(uploadFile.status).toBe(UploadStatus.SCHEDULED) + }) + + it('uploading', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + + const { promise: uploadDataPromise } = Promise.withResolvers() + uploadDataMock.mockImplementationOnce(() => uploadDataPromise) + + const { promise: queuePromise, resolve: queueResolve } = Promise.withResolvers() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + const queue = { add: vi.fn((fn: () => Promise) => (queueResolve(), fn())) } + // start upload and wait for queue to be called + uploadFile.start(queue as never) + await queuePromise + + expect(uploadFile.status).toBe(UploadStatus.UPLOADING) + }) + + it('finished', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.resolve()) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]() + expect(uploadFile.status).toBe(UploadStatus.FINISHED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('cancelled by DOM', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new DOMException('Aborted', 'AbortError'))) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.CANCELLED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('cancelled by axios', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new CanceledError())) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.CANCELLED) + expect(onFinish).toHaveBeenCalledOnce() + }) + + it('failed', async () => { + isPublicShareMock.mockReturnValue(false) + getMaxChunksSizeMock.mockReturnValue(1024) + uploadDataMock.mockImplementationOnce(() => Promise.reject(new Error('generic error'))) + + const onFinish = vi.fn() + const uploadFile = new UploadFile('/destination', new File(['x'.repeat(1024)], 'filename'), { noChunking: false }) + uploadFile.addEventListener('finished', onFinish) + + const queue = { add: vi.fn((_fn: () => Promise) => {}) } + await uploadFile.start(queue as never) + await queue.add.mock.calls[0][0]().catch(() => {}) + expect(uploadFile.status).toBe(UploadStatus.FAILED) + expect(onFinish).toHaveBeenCalledOnce() + }) +}) diff --git a/lib/upload/uploader/UploadFile.ts b/lib/upload/uploader/UploadFile.ts index 2dbe68a8..3984f30f 100644 --- a/lib/upload/uploader/UploadFile.ts +++ b/lib/upload/uploader/UploadFile.ts @@ -7,21 +7,15 @@ import type PQueue from 'p-queue' import type { IUpload, TUploadStatus } from './Upload.ts' import axios from '@nextcloud/axios' -import { getCapabilities } from '@nextcloud/capabilities' import { join } from '@nextcloud/paths' import { isPublicShare } from '@nextcloud/sharing/public' import { UploadCancelledError } from '../errors/UploadCancelledError.ts' import { UploadFailedError } from '../errors/UploadFailedError.ts' -import { getMaxChunksSize } from '../utils/config.ts' +import { getMaxChunksSize, supportsPublicChunking } from '../utils/config.ts' import { getMtimeHeader, isRequestAborted } from '../utils/requests.ts' import { getChunk, initChunkWorkspace, uploadData } from '../utils/upload.ts' import { Upload, UploadStatus } from './Upload.ts' -/** - * Shared state to determine if the server supports chunking for public shares. - */ -let supportsPublicChunking: boolean | undefined - /** * A class representing a single file to be uploaded */ @@ -65,16 +59,11 @@ export class UploadFile extends Upload implements IUpload { } get isChunked(): boolean { - if (supportsPublicChunking === undefined) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - supportsPublicChunking = (getCapabilities() as Record).dav?.public_shares_chunking ?? false - } - const maxChunkSize = getMaxChunksSize('size' in this.#fileHandle ? this.#fileHandle.size : undefined) return !this.#noChunking && maxChunkSize > 0 - && this.totalBytes >= maxChunkSize - && (supportsPublicChunking || !isPublicShare()) + && this.totalBytes > maxChunkSize + && (!isPublicShare() || supportsPublicChunking()) } async start(queue: PQueue): Promise { @@ -90,6 +79,7 @@ export class UploadFile extends Upload implements IUpload { try { if (this.isChunked) { + this.numberOfChunks = Math.ceil(this.totalBytes / getMaxChunksSize(this.totalBytes)) await this.#uploadChunked(queue) } else { queue.add(() => this.#upload()) @@ -127,7 +117,6 @@ export class UploadFile extends Upload implements IUpload { const temporaryUrl = await initChunkWorkspace(this.source, 5, isPublicShare(), this.#customHeaders) const promises: Promise[] = [] - this.numberOfChunks = Math.ceil(this.totalBytes / getMaxChunksSize(this.totalBytes)) for (let i = 0; i < this.numberOfChunks; i++) { const chunk = await getChunk(this.#file!, i * getMaxChunksSize(this.totalBytes), (i + 1) * getMaxChunksSize(this.totalBytes)) promises.push(queue.add(() => this.#uploadChunk(chunk, join(temporaryUrl, String(i))))) diff --git a/lib/upload/utils/config.ts b/lib/upload/utils/config.ts index 1210f023..cddefe39 100644 --- a/lib/upload/utils/config.ts +++ b/lib/upload/utils/config.ts @@ -14,6 +14,14 @@ export function getMaxParallelUploads(): number { return capabilities.files?.chunked_upload?.max_parallel_count ?? 5 } +/** + * Checks if the server supports chunking for public shares. + */ +export function supportsPublicChunking(): boolean { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (getCapabilities() as Record).dav?.public_shares_chunking ?? false +} + /** * Get the maximum chunk size for chunked uploads based on the server configuration and file size. * From 9e0f249b0dd5d9b52d66ac0d7793c15bbd5b61c0 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Thu, 19 Feb 2026 14:20:49 +0100 Subject: [PATCH 13/14] feat(upload): allow to track children of an upload This allow to better visualize ongoing uploads in an UI Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/Upload.ts | 15 +++++++++++++++ lib/upload/uploader/UploadFileTree.ts | 10 ++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/lib/upload/uploader/Upload.ts b/lib/upload/uploader/Upload.ts index 9a27f1bc..7d3ef607 100644 --- a/lib/upload/uploader/Upload.ts +++ b/lib/upload/uploader/Upload.ts @@ -62,6 +62,13 @@ export interface IUpload extends TypedEventTarget { */ readonly signal: AbortSignal + /** + * The children of this upload. + * - For a file upload, this will be an empty array. + * - For a folder upload, this will be the uploads of the children files and folders. + */ + readonly children: IUpload[] + /** * Cancels the upload */ @@ -82,6 +89,14 @@ export abstract class Upload extends TypedEventTarget implements P this.#abortController.abort() } + /** + * Get the children of this upload. + * For a file upload, this will be an empty array, for a folder upload, this will be the uploads of the children files and folders. + */ + public get children(): IUpload[] { + return [] + } + /** * Start the upload * diff --git a/lib/upload/uploader/UploadFileTree.ts b/lib/upload/uploader/UploadFileTree.ts index 8d808a50..1c34c622 100644 --- a/lib/upload/uploader/UploadFileTree.ts +++ b/lib/upload/uploader/UploadFileTree.ts @@ -89,10 +89,15 @@ export class UploadFileTree extends Upload implements IUpload { return false } + get children(): IUpload[] { + return [...this.#children] + } + /** * Set up all child uploads for this upload tree. */ initialize(): (Upload & IUpload)[] { + const grandchildren: (Upload & IUpload)[] = [] for (const child of this.#directory.children) { if (child instanceof FileTree) { const upload = new UploadFileTree( @@ -104,7 +109,8 @@ export class UploadFileTree extends Upload implements IUpload { noChunking: this.#noChunking, }, ) - this.#children.push(upload, ...upload.initialize()) + this.#children.push(upload) + grandchildren.push(...upload.initialize()) } else { const upload = new UploadFile( join(this.source, child.name), @@ -114,7 +120,7 @@ export class UploadFileTree extends Upload implements IUpload { this.#children.push(upload) } } - return this.#children + return [...this.#children, ...grandchildren] } async start(queue: PQueue): Promise { From 2ba1041598a174a1f99edf698b9b6b55730b65ea Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Thu, 19 Feb 2026 15:31:00 +0100 Subject: [PATCH 14/14] feat(uploader): emit proper events for changed state Signed-off-by: Ferdinand Thiessen --- lib/upload/uploader/Uploader.ts | 38 ++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/lib/upload/uploader/Uploader.ts b/lib/upload/uploader/Uploader.ts index f30e38aa..e63959fb 100644 --- a/lib/upload/uploader/Uploader.ts +++ b/lib/upload/uploader/Uploader.ts @@ -56,10 +56,31 @@ interface BatchUploadOptions extends UploadOptions { } interface UploaderEventsMap { + /** + * Dispatched when the uploader is paused + */ paused: CustomEvent + /** + * Dispatched when the uploader is resumed + */ resumed: CustomEvent - progress: CustomEvent + /** + * Dispatched when the uploader has finished all uploads (successfully, failed or cancelled) + */ finished: CustomEvent + + /** + * Dispatched when a new upload has been started. + */ + uploadStarted: CustomEvent + /** + * Dispatched when an upload has made progress (e.g. a chunk has been uploaded). + */ + uploadProgress: CustomEvent + /** + * Dispatched when an upload has finished (successfully, failed or cancelled). + */ + uploadFinished: CustomEvent } export class Uploader extends TypedEventTarget { @@ -262,6 +283,7 @@ export class Uploader extends TypedEventTarget { this.#attachEventListeners(upload) } this.#uploadQueue.push(...uploads) + this.dispatchTypedEvent('uploadStarted', new CustomEvent('uploadStarted', { detail: upload })) await upload.start(this.#jobQueue) return uploads } @@ -283,6 +305,7 @@ export class Uploader extends TypedEventTarget { this.#attachEventListeners(upload) this.#uploadQueue.push(upload) + this.dispatchTypedEvent('uploadStarted', new CustomEvent('uploadStarted', { detail: upload })) await upload.start(this.#jobQueue) return upload } @@ -290,12 +313,14 @@ export class Uploader extends TypedEventTarget { /** * Handle the progress event of an upload. * Update the ETA and dispatch a progress event for the uploader. + * + * @param event - The progress event of an upload */ - #onProgress() { + #onProgress(event: CustomEvent) { const totalBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.totalBytes, 0) const uploadedBytes = this.#uploadQueue.reduce((acc, upload) => acc + upload.uploadedBytes, 0) this.#eta.update(uploadedBytes, totalBytes) - this.dispatchTypedEvent('progress', new CustomEvent('progress')) + this.dispatchTypedEvent('uploadProgress', new CustomEvent('uploadProgress', { detail: event.detail })) } /** @@ -303,9 +328,12 @@ export class Uploader extends TypedEventTarget { * * 1. Update the progress * 2. if all uploads are finished dispatch a finished event for the uploader and clear the queue + * + * @param event - The finished event of an upload */ - async #onFinished() { - this.#onProgress() + async #onFinished(event: CustomEvent) { + this.#onProgress(event) + this.dispatchTypedEvent('uploadFinished', new CustomEvent('uploadFinished', { detail: event.detail })) const finalStates = [ UploadStatus.FINISHED,