Skip to content

Commit 4558f52

Browse files
committed
fix: unreachable UNC paths in the vfs can cause "not found" error on the frontend #1115
1 parent 2fa2e90 commit 4558f52

File tree

3 files changed

+62
-40
lines changed

3 files changed

+62
-40
lines changed

src/stat.ts

Lines changed: 33 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,42 +1,39 @@
1-
import { defineConfig } from './config'
1+
import { Worker } from 'node:worker_threads'
22
import { Stats } from 'node:fs'
3-
import { haveTimeout, pendingPromise } from './cross'
4-
import { stat } from 'fs/promises'
3+
import { pendingPromise } from './cross'
54

6-
const fileTimeout = defineConfig('file_timeout', 3, x => x * 1000)
5+
// all stat requests for the same worker are serialized, potentially introducing extra latency
76

8-
// since nodejs' UV_THREADPOOL_SIZE is limited, avoid using multiple slots for the same UNC host, and always leave one free for local operations
9-
const poolSize = Number(process.env.UV_THREADPOOL_SIZE || 4)
10-
const previous = new Map<string, Promise<Stats>>() // wrapped promises with haveTimeout
11-
const working = new Set<Promise<Stats>>() // plain stat's promise
12-
export async function statWithTimeout(path: string) {
13-
const uncHost = /^\\\\([^\\]+)\\/.exec(path)?.[1]
14-
if (!uncHost)
15-
return haveTimeout(fileTimeout.compiled(), stat(path))
16-
const busy = process.env.HFS_PARALLEL_UNC ? null : previous.get(uncHost) // by default we serialize requests on the same UNC host, to keep threadpool usage low
17-
const ret = pendingPromise<Stats>()
18-
previous.set(uncHost, ret) // reserve the slot before starting the operation
19-
const err = await busy?.then(() => false, e => e.message === 'timeout' && e) // only timeout error is shared with pending requests
20-
if (err) {
21-
if (previous.get(uncHost) === ret) // but we don't want to block forever, only involve those that were already waiting
22-
previous.delete(uncHost)
23-
ret.reject(err)
7+
const pool = new Map<string, (path: string) => Promise<Stats>>()
8+
9+
export function getStatWorker(key: string) {
10+
const existing = pool.get(key)
11+
if (existing)
12+
return existing
13+
const worker = new Worker(__dirname + '/statWorker.js')
14+
worker.unref()
15+
const requests = new Map()
16+
worker.on('message', (msg: any) => {
17+
requests.get(msg.path)?.resolve(msg.error
18+
? Promise.reject(new Error(msg.error))
19+
: Object.setPrototypeOf(msg.result, Stats.prototype)
20+
)
21+
requests.delete(msg.path)
22+
})
23+
worker.on('error', (err) => {
24+
for (const p of requests.values())
25+
p.reject(err)
26+
requests.clear()
27+
worker.terminate().catch(() => {})
28+
pool.delete(key)
29+
})
30+
pool.set(key, query)
31+
return query
32+
33+
function query(path: string) {
34+
const ret = pendingPromise<Stats>()
35+
requests.set(path, ret)
36+
worker.postMessage(path)
2437
return ret
2538
}
26-
while (working.size >= poolSize - 1) // always leave one slot free for local operations
27-
await Promise.race(working.values()).catch(() => {}) // we are assuming UV_THREADPOOL_SIZE > 1, otherwise race() will deadlock
28-
const op = stat(path)
29-
working.add(op)
30-
try {
31-
ret.resolve(await haveTimeout(fileTimeout.compiled(),
32-
op.finally(() => working.delete(op)) ))
33-
}
34-
catch (e) {
35-
ret.reject(e)
36-
}
37-
finally {
38-
if (previous.get(uncHost) === ret)
39-
previous.delete(uncHost)
40-
}
41-
return ret
4239
}

src/statWorker.js

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
const { parentPort } = require('node:worker_threads')
2+
const { statSync } = require('node:fs')
3+
4+
parentPort.on('message', async path => {
5+
try { // we use statSync to not use (and not risk to saturate) libuv's thread pool
6+
parentPort.postMessage({ path, result: { ...statSync(path) } })
7+
} catch (err) {
8+
parentPort.postMessage({ path, error: err?.message || String(err) })
9+
}
10+
})

src/util-files.ts

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,33 @@
11
// This file is part of HFS - Copyright 2021-2023, Massimo Melina <[email protected]> - License https://www.gnu.org/licenses/gpl-3.0.txt
22

3-
import { access, mkdir, readFile } from 'fs/promises'
4-
import { Promisable, try_, wait, isWindowsDrive } from './cross'
3+
import { access, mkdir, readFile, stat } from 'fs/promises'
4+
import { Promisable, try_, wait, isWindowsDrive, haveTimeout } from './cross'
5+
import { defineConfig } from './config'
56
import { createWriteStream, mkdirSync, watch, ftruncate } from 'fs'
67
import { basename, dirname } from 'path'
78
import glob from 'fast-glob'
89
import { IS_WINDOWS } from './const'
910
import { once } from 'events'
1011
import { Readable } from 'stream'
11-
import { statWithTimeout } from './stat'
12+
import { getStatWorker } from './stat'
1213
// @ts-ignore
1314
import unzipper from 'unzip-stream'
1415

15-
export { statWithTimeout }
16+
const fileTimeout = defineConfig('file_timeout', 3, x => x * 1000)
17+
// a smart (and a bit arbitrary) way to decide if we need the stat-workers functionality. Without it, we may be a bit faster. We'll see with experience if we need a dedicated configuration.
18+
const disableStatWorkers = Number(process.env.UV_THREADPOOL_SIZE) >= 10
19+
20+
// some paths (mostly offline networked ones) can take 20+ seconds to fail
21+
export async function statWithTimeout(path: string) {
22+
// with just 4 requests we saturate the default UV_THREADPOOL_SIZE, blocking even local fs operations, so we move all UNC requests to worker threads
23+
const workerKey = !disableStatWorkers && IS_WINDOWS && getUncHost(path) // pool requests by server name
24+
const op = workerKey ? getStatWorker(workerKey)(path) : stat(path)
25+
return haveTimeout(fileTimeout.compiled(), op)
26+
}
27+
28+
function getUncHost(path: string) {
29+
return /^\\\\([^\\]+)\\/.exec(path)?.[1]
30+
}
1631

1732
export async function isDirectory(path: string) {
1833
try { return (await statWithTimeout(path)).isDirectory() }

0 commit comments

Comments
 (0)