diff --git a/packages/test/snapshots/.mocharc.cjs b/packages/test/snapshots/.mocharc.cjs index ce3d13cabcdd..6a9841e4afce 100644 --- a/packages/test/snapshots/.mocharc.cjs +++ b/packages/test/snapshots/.mocharc.cjs @@ -8,7 +8,9 @@ const getFluidTestMochaConfig = require("@fluid-internal/mocha-test-setup/mocharc-common"); const config = getFluidTestMochaConfig(__dirname); -config.ignore = config.spec + "/generate/**/*"; +// These tests need to be run with multiple different test file filters for different "test" cases. +// It's simplest to just let the individual scripts specify what they need and disable the default. +delete config.spec; // TODO: figure out why this package needs the --exit flag, tests might not be cleaning up correctly after themselves // AB#7856 config.exit = true; diff --git a/packages/test/snapshots/package.json b/packages/test/snapshots/package.json index bb573701e463..f159ebf67f9b 100644 --- a/packages/test/snapshots/package.json +++ b/packages/test/snapshots/package.json @@ -33,11 +33,11 @@ "test": "npm run test:mocha", "test:coverage": "c8 npm test", "test:mocha": "npm run test:mocha:esm && echo skipping cjs to avoid overhead - npm run test:mocha:cjs", - "test:mocha:cjs": "cross-env FLUID_TEST_MODULE_SYSTEM=CJS mocha", - "test:mocha:esm": "mocha", + "test:mocha:cjs": "cross-env FLUID_TEST_MODULE_SYSTEM=CJS mocha --ignore \"dist/test/generate/**/*\" \"dist/test/**/*.spec.*js\"", + "test:mocha:esm": "mocha --ignore \"lib/test/generate/**/*\" \"lib/test/**/*.spec.*js\"", "test:mocha:verbose": "cross-env FLUID_TEST_VERBOSE=1 npm run test:mocha", - "test:new": "mocha --experimental-worker \"lib/test/generate/new.spec.*js\"", - "test:update": "mocha --experimental-worker \"lib/test/generate/update.spec.*js\"", + "test:new": "mocha \"lib/test/generate/new.spec.*js\"", + "test:update": "mocha \"lib/test/generate/update.spec.*js\"", "tsc": "fluid-tsc commonjs --project ./tsconfig.cjs.json && copyfiles -f ../../../common/build/build-common/src/cjs/package.json ./dist" }, "c8": { diff --git a/packages/test/snapshots/src/replayMultipleFiles.ts b/packages/test/snapshots/src/replayMultipleFiles.ts index d5882111598f..c2088b0e9de7 100644 --- a/packages/test/snapshots/src/replayMultipleFiles.ts +++ b/packages/test/snapshots/src/replayMultipleFiles.ts @@ -50,7 +50,17 @@ export enum Mode { UpdateSnapshots, // Update the current snapshot files. } +/** + * Arguments capturing a unit of snapshot work. + * + * @remarks + * When running in worker thread, this interface is used as the type for + * `workerData` and must be passable as such. + */ export interface IWorkerArgs { + /** Name of the snapshot test */ + name: string; + /** Snapshot folder */ folder: string; mode: Mode; snapFreq?: number; @@ -58,13 +68,34 @@ export interface IWorkerArgs { initializeFromSnapshotsDir?: string; } +/** + * Limits the number of concurrent promises being processed. + * + * @remarks + * Up to `limit` number of promises can be processed concurrently. Once limit + * is reached, if more work is added, the `addWork` call will wait until one + * of the existing promises complete. + * + * Currently all workers are handled independently, so if one of the workers + * fails, the other workers will continue to be scheduled and processed. + */ class ConcurrencyLimiter { private readonly promises: Promise[] = []; private deferred: Deferred | undefined; constructor(private limit: number) {} - async addWork(worker: () => Promise) { + /** + * Adds work to be processed. + * + * @remarks + * Callers must await the returned promise before calling `addWork` another time. + * + * Work is guaranteed to have started when the returned promise resolves. + * + * `waitAll` must be awaited on to ensure all work is complete. + */ + async addWork(worker: () => Promise): Promise { this.limit--; if (this.limit < 0) { assert(this.deferred === undefined); @@ -74,10 +105,12 @@ class ConcurrencyLimiter { assert(this.limit >= 0); } - const p = worker().then(() => { + const p = worker().finally(() => { this.limit++; if (this.deferred) { assert(this.limit === 0); + // This will allow other processing to proceed even on error. + // To end early, check for error and reject deferred. this.deferred.resolve(); this.deferred = undefined; } @@ -137,7 +170,12 @@ export async function processOneNode(args: IWorkerArgs) { // replayArgs.overlappingContainers = 1; try { + const start = performance.now(); const errors = await new ReplayTool(replayArgs).Go(); + const end = performance.now(); + console.log( + `${args.name} processed with ${errors.length} errors in ${((end - start) / 1000).toFixed(2)} seconds`, + ); if (errors.length !== 0) { throw new Error(`Errors\n ${errors.join("\n")}`); } @@ -147,6 +185,17 @@ export async function processOneNode(args: IWorkerArgs) { } } +/** + * Processes the content of snapshot folders. + * + * @param mode - test mode + * @param concurrently - when true (default), process multiple snapshot folders + * concurrently. Full processing is time intensive but folders are independent, + * so concurrency helps speed up overall test time. Ideally individual folders + * would be their own test case, mocha does not have direct support for running + * any in parallel that aren't broken up into separate files. With this + * concurrency implementation, progress is shown logging each case as it completes. + */ export async function processContent(mode: Mode, concurrently = true) { const limiter = new ConcurrencyLimiter(numberOfThreads); @@ -185,12 +234,13 @@ export async function processContent(mode: Mode, concurrently = true) { testSummaries = true; } - const data: IWorkerArgs = { + const data = { + name: node.name, folder, mode, snapFreq, testSummaries, - }; + } as const satisfies IWorkerArgs; switch (mode) { case Mode.Validate: @@ -218,7 +268,7 @@ export async function processContent(mode: Mode, concurrently = true) { * from multiple old versions, process snapshot from each of these versions. */ async function processNodeForValidate( - data: IWorkerArgs, + data: Readonly, concurrently: boolean, limiter: ConcurrencyLimiter, ) { @@ -235,8 +285,12 @@ async function processNodeForValidate( continue; } - data.initializeFromSnapshotsDir = `${srcSnapshotsDir}/${node.name}`; - await processNode(data, concurrently, limiter); + const subData = { + ...data, + name: `${data.name}-${node.name}`, + initializeFromSnapshotsDir: `${srcSnapshotsDir}/${node.name}`, + }; + await processNode(subData, concurrently, limiter); } } @@ -248,7 +302,7 @@ async function processNodeForValidate( * - Update the package version of the current snapshots. */ async function processNodeForUpdatingSnapshots( - data: IWorkerArgs, + data: Readonly, concurrently: boolean, limiter: ConcurrencyLimiter, ) { @@ -297,7 +351,7 @@ async function processNodeForUpdatingSnapshots( * generate snapshot files and write them to the current snapshots dir. */ async function processNodeForNewSnapshots( - data: IWorkerArgs, + data: Readonly, concurrently: boolean, limiter: ConcurrencyLimiter, ) { @@ -311,10 +365,10 @@ async function processNodeForNewSnapshots( fs.mkdirSync(currentSnapshotsDir, { recursive: true }); // For new snapshots, testSummaries should be set because summaries should be generated as per the original file. - data.testSummaries = true; + const dataSummaries = { ...data, testSummaries: true }; // Process the current folder which will write the generated snapshots to current snapshots dir. - await processNode(data, concurrently, limiter); + await processNode(dataSummaries, concurrently, limiter); const versionFileName = `${currentSnapshotsDir}/snapshotVersion.json`; // Write the versions file to the current snapshots dir. @@ -323,7 +377,7 @@ async function processNodeForNewSnapshots( }); // Write the metadata file. - writeMetadataFile(data.folder); + writeMetadataFile(dataSummaries.folder); } /** @@ -335,7 +389,7 @@ async function processNodeForNewSnapshots( * 3. Validates that the snapshot matches with the corresponding snapshot in current version. * 4. Loads a document with snapshot in current version. Repeats steps 2 and 3. */ -async function processNodeForBackCompat(data: IWorkerArgs) { +async function processNodeForBackCompat(data: Readonly) { const messagesFile = `${data.folder}/messages.json`; if (!fs.existsSync(messagesFile)) { throw new Error(`messages.json doesn't exist in ${data.folder}`); @@ -381,10 +435,10 @@ async function processNodeForBackCompat(data: IWorkerArgs) { * the threads. If concurrently if false, directly processes the snapshots. */ async function processNode( - workerData: IWorkerArgs, + workerData: Readonly, concurrently: boolean, limiter: ConcurrencyLimiter, -) { +): Promise { // "worker_threads" does not resolve without --experimental-worker flag on command line let threads: typeof import("worker_threads"); try {