diff --git a/.buildkite/react-native-pipeline.benchmark.block.yml b/.buildkite/react-native-pipeline.benchmark.block.yml new file mode 100644 index 000000000..79ab4dd32 --- /dev/null +++ b/.buildkite/react-native-pipeline.benchmark.block.yml @@ -0,0 +1,10 @@ +steps: + - block: "Trigger react native benchmark pipeline" + key: "trigger-react-native-benchmark-pipeline" + + - label: ":pipeline_upload: React native benchmark pipeline" + depends_on: "trigger-react-native-benchmark-pipeline" + agents: + queue: macos + timeout_in_minutes: 2 + command: buildkite-agent pipeline upload .buildkite/react-native-pipeline.benchmark.yml \ No newline at end of file diff --git a/.buildkite/react-native-pipeline.benchmark.yml b/.buildkite/react-native-pipeline.benchmark.yml new file mode 100644 index 000000000..91251a327 --- /dev/null +++ b/.buildkite/react-native-pipeline.benchmark.yml @@ -0,0 +1,153 @@ +agents: + queue: "opensource" + +steps: + - group: "React Native Benchmark Tests" + steps: + # + # Build fixtures + # + - label: ':android: Build RN {{matrix}} benchmark fixture APK (New Arch)' + key: "build-react-native-android-fixture-benchmark-new-arch" + timeout_in_minutes: 30 + agents: + queue: macos-15 + env: + JAVA_VERSION: "17" + NODE_VERSION: "18" + RN_VERSION: "{{matrix}}" + NOTIFIER_VERSION: "8.0.0" + RCT_NEW_ARCH_ENABLED: "1" + BUILD_ANDROID: "true" + NATIVE_INTEGRATION: "1" + artifact_paths: + - "test/react-native/features/fixtures/generated/native-integration/new-arch/**/reactnative.apk" + commands: + - bundle install + - node test/react-native/scripts/generate-react-native-fixture.js + matrix: + - "0.80" + retry: + automatic: + - exit_status: "*" + limit: 1 + + - label: ':mac: Build RN {{matrix}} benchmark fixture ipa (New Arch)' + key: "build-react-native-ios-fixture-benchmark-new-arch" + timeout_in_minutes: 30 + agents: + queue: "macos-15" + env: + NODE_VERSION: "18" + RN_VERSION: "{{matrix}}" + RCT_NEW_ARCH_ENABLED: "1" + NOTIFIER_VERSION: "8.0.0" + BUILD_IOS: "true" + XCODE_VERSION: "16.2.0" + NATIVE_INTEGRATION: "1" + artifact_paths: + - "test/react-native/features/fixtures/generated/native-integration/new-arch/**/output/reactnative.ipa" + commands: + - bundle install + - node test/react-native/scripts/generate-react-native-fixture.js + matrix: + - "0.80" + retry: + automatic: + - exit_status: "*" + limit: 1 + + # + # Run benchmarks + # + - label: ":bitbar: :android: RN {{matrix}} Android Benchmarks" + depends_on: "build-react-native-android-fixture-benchmark-new-arch" + timeout_in_minutes: 20 + plugins: + artifacts#v1.9.0: + download: "test/react-native/features/fixtures/generated/native-integration/new-arch/{{matrix}}/reactnative.apk" + upload: + - "./test/react-native/maze_output/failed/*" + - "./test/react-native/maze_output/maze_output.zip" + - "./test/react-native/maze_output/metrics.csv" + docker-compose#v4.7.0: + pull: react-native-maze-runner + run: react-native-maze-runner + service-ports: true + command: + - --app=/app/features/fixtures/generated/native-integration/new-arch/{{matrix}}/reactnative.apk + - --farm=bb + - --device=ANDROID_13 + - --a11y-locator + - --fail-fast + - --appium-version=1.22 + - --no-tunnel + - --aws-public-ip + - --tags=@benchmark + test-collector#v1.10.2: + files: "reports/TEST-*.xml" + format: "junit" + branch: "^main|next$$" + api-token-env-name: "REACT_NATIVE_PERFORMANCE_BUILDKITE_ANALYTICS_TOKEN" + env: + RCT_NEW_ARCH_ENABLED: "1" + NATIVE_INTEGRATION: "1" + BENCHMARKS: "1" + RN_VERSION: "{{matrix}}" + retry: + manual: + permit_on_passed: true + automatic: + - exit_status: 103 # Appium session failed + limit: 2 + concurrency: 25 + concurrency_group: "bitbar" + concurrency_method: eager + matrix: + - "0.80" + + - label: ":bitbar: :mac: RN {{matrix}} iOS Benchmarks" + depends_on: "build-react-native-ios-fixture-benchmark-new-arch" + timeout_in_minutes: 20 + plugins: + artifacts#v1.9.0: + download: "test/react-native/features/fixtures/generated/native-integration/new-arch/{{matrix}}/output/reactnative.ipa" + upload: + - "./test/react-native/maze_output/failed/*" + - "./test/react-native/maze_output/maze_output.zip" + - "./test/react-native/maze_output/metrics.csv" + docker-compose#v4.12.0: + pull: react-native-maze-runner + run: react-native-maze-runner + service-ports: true + command: + - --app=/app/features/fixtures/generated/native-integration/new-arch/{{matrix}}/output/reactnative.ipa + - --farm=bb + - --device=IOS_14|IOS_15|IOS_16|IOS_17|IOS_18|IOS_26 + - --a11y-locator + - --fail-fast + - --appium-version=1.22 + - --no-tunnel + - --aws-public-ip + - --tags=@benchmark + test-collector#v1.10.2: + files: "reports/TEST-*.xml" + format: "junit" + branch: "^main|next$$" + api-token-env-name: "REACT_NATIVE_PERFORMANCE_BUILDKITE_ANALYTICS_TOKEN" + env: + RCT_NEW_ARCH_ENABLED: "1" + NATIVE_INTEGRATION: "1" + BENCHMARKS: "1" + RN_VERSION: "{{matrix}}" + retry: + manual: + permit_on_passed: true + automatic: + - exit_status: 103 # Appium session failed + limit: 2 + concurrency: 25 + concurrency_group: "bitbar" + concurrency_method: eager + matrix: + - "0.80" diff --git a/.buildkite/scripts/packages.json b/.buildkite/scripts/packages.json index c3d041784..3ac9e77a0 100644 --- a/.buildkite/scripts/packages.json +++ b/.buildkite/scripts/packages.json @@ -77,5 +77,9 @@ "pipeline": ".buildkite/expo-pipeline.full.yml", "block": ".buildkite/expo-pipeline.full.block.yml", "paths": [] + }, + { + "block": ".buildkite/react-native-pipeline.benchmark.block.yml", + "paths": [] } ] diff --git a/.buildkite/scripts/pipeline-trigger.js b/.buildkite/scripts/pipeline-trigger.js index 9fb6a4bc6..804041ebc 100644 --- a/.buildkite/scripts/pipeline-trigger.js +++ b/.buildkite/scripts/pipeline-trigger.js @@ -28,7 +28,7 @@ packages.reverse().forEach(({ paths, block, pipeline, environment, skip }) => { } // Upload all pipelines if specified in the commit message - if (commitMessage.includes("[full ci]") || + if (pipeline && commitMessage.includes("[full ci]") || isFullBuild || currentBranch === "main" || baseBranch === "main") { diff --git a/docker-compose.yml b/docker-compose.yml index 2deedfdc2..3ceaf8763 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -82,6 +82,7 @@ services: RN_VERSION: EXPO_VERSION: REACT_NATIVE_NAVIGATION: + BENCHMARKS: MAZE_REPEATER_API_KEY: "${MAZE_REPEATER_API_KEY_RN:-}" MAZE_HUB_REPEATER_API_KEY: "${MAZE_HUB_REPEATER_API_KEY_RN:-}" ports: diff --git a/test/react-native/features/benchmarks/cross_layer_spans.feature b/test/react-native/features/benchmarks/cross_layer_spans.feature new file mode 100644 index 000000000..e4d1d8c1f --- /dev/null +++ b/test/react-native/features/benchmarks/cross_layer_spans.feature @@ -0,0 +1,19 @@ +@benchmark @native_integration @ios_only +Feature: Cross-Layer Spans + + Scenario Outline: + When I run benchmark "NativeNamedSpanBenchmark" configured as + And I wait for 30 seconds + And I wait to receive at least 1 metrics + And I discard the oldest metric + And I relaunch the app after shutdown + + Examples: + | options | + | "native nativeSpans" | + | "native nativeSpans jsSpans" | + | "native nativeSpans rendering" | + | "native nativeSpans cpu" | + | "native nativeSpans memory" | + | "native nativeSpans rendering cpu memory" | + | "native nativeSpans rendering cpu memory jsSpans" | diff --git a/test/react-native/features/steps/react-native-steps.rb b/test/react-native/features/steps/react-native-steps.rb index b61e43785..7d4f9ca6c 100644 --- a/test/react-native/features/steps/react-native-steps.rb +++ b/test/react-native/features/steps/react-native-steps.rb @@ -1,5 +1,12 @@ When('I run {string}') do |scenario_name| - execute_command 'run-scenario', scenario_name + run_scenario scenario_name +end + +When('I run benchmark {string} configured as {string}') do |benchmark_name, config| + execute_command 'run-benchmark', { + benchmark_name: benchmark_name, + config: config + } end When('I execute the command {string}') do |command| @@ -113,7 +120,14 @@ ) end -def execute_command(action, scenario_name = '') +def run_scenario(scenario_name = '') + execute_command 'run-scenario', { + scenario_name: scenario_name, + payload: scenario_name + } +end + +def execute_command(action, command_hash = nil) address = if Maze.config.farm == :bb if Maze.config.aws_public_ip Maze.public_address @@ -122,21 +136,23 @@ def execute_command(action, scenario_name = '') end else case Maze::Helper.get_current_platform - when 'android' - 'localhost:9339' - else - 'bs-local.com:9339' + when 'android' + 'localhost:9339' + else + 'bs-local.com:9339' end end command = { action: action, - scenario_name: scenario_name, - payload: scenario_name, endpoint: "http://#{address}/traces", api_key: $api_key, } + unless command_hash.nil? + command.merge! command_hash + end + $logger.debug("Queuing command: #{command}") Maze::Server.commands.add command end diff --git a/test/react-native/features/support/env.rb b/test/react-native/features/support/env.rb index cc8bab0cd..43e824d88 100644 --- a/test/react-native/features/support/env.rb +++ b/test/react-native/features/support/env.rb @@ -8,6 +8,10 @@ Maze.config.receive_requests_wait = 60 end + if ENV["BENCHMARKS"] + Maze.config.receive_requests_wait = 180 + end + end Before('@skip') do @@ -59,4 +63,8 @@ current_version = ENV['RN_VERSION'].nil? ? 0 : ENV['RN_VERSION'].to_f skip_this_scenario("Skipping scenario: Not running native integration fixture") unless ENV["NATIVE_INTEGRATION"] skip_this_scenario("Skipping scenario: Not supported in 0.72") if Maze::Helper.get_current_platform == 'ios' && current_version == 0.72 +end + +Before('@benchmark') do |scenario| + skip_this_scenario("Skipping scenario: Not running benchmark tests") unless ENV["BENCHMARKS"] end \ No newline at end of file diff --git a/test/react-native/native-test-utils/android/build.gradle b/test/react-native/native-test-utils/android/build.gradle index c62990070..59ef49863 100644 --- a/test/react-native/native-test-utils/android/build.gradle +++ b/test/react-native/native-test-utils/android/build.gradle @@ -13,7 +13,8 @@ android { } dependencies { - compileOnly("com.bugsnag:bugsnag-android-performance:1.16.0") + compileOnly("com.bugsnag:bugsnag-android-performance:2.0.0") + compileOnly("com.bugsnag:bugsnag-android-performance-impl:2.0.0") implementation project(':bugsnag_react-native-performance') implementation project(':bugsnag_plugin-react-native-span-access') } \ No newline at end of file diff --git a/test/react-native/native-test-utils/android/src/main/java/com/bugsnag/test/utils/BugsnagTestUtils.java b/test/react-native/native-test-utils/android/src/main/java/com/bugsnag/test/utils/BugsnagTestUtils.java index 57aa4357d..bb06b488a 100644 --- a/test/react-native/native-test-utils/android/src/main/java/com/bugsnag/test/utils/BugsnagTestUtils.java +++ b/test/react-native/native-test-utils/android/src/main/java/com/bugsnag/test/utils/BugsnagTestUtils.java @@ -11,6 +11,7 @@ import com.bugsnag.android.performance.AutoInstrument; import com.bugsnag.android.performance.BugsnagPerformance; +import com.bugsnag.android.performance.EnabledMetrics; import com.bugsnag.android.performance.PerformanceConfiguration; import com.bugsnag.reactnative.performance.nativespans.BugsnagJavascriptSpansPlugin; @@ -124,9 +125,31 @@ public static boolean startNativePerformance(Context context, Map metricsConfig = (Map)configuration.get("enabledMetrics"); + EnabledMetrics enabledMetrics = new EnabledMetrics( + Boolean.TRUE.equals(metricsConfig.get("rendering")), + Boolean.TRUE.equals(metricsConfig.get("cpu")), + Boolean.TRUE.equals(metricsConfig.get("memory")) + ); + + config.setEnabledMetrics(enabledMetrics); + } + + if (!configuration.containsKey("nativeSpans") || Boolean.TRUE.equals(configuration.get("nativeSpans"))) { + config.addPlugin(new BugsnagNativeSpansPlugin()); + } + if (!configuration.containsKey("jsSpans") || Boolean.TRUE.equals(configuration.get("jsSpans"))) { + config.addPlugin(new BugsnagJavascriptSpansPlugin()); + } + if (!configuration.containsKey("nativeAppStarts") || Boolean.TRUE.equals(configuration.get("nativeAppStarts"))) { + config.addPlugin(new BugsnagReactNativeAppStartPlugin()); + } BugsnagPerformance.start(config); Log.d(TAG, "Native performance started successfully"); diff --git a/test/react-native/native-test-utils/ios/BugsnagTestUtils.mm b/test/react-native/native-test-utils/ios/BugsnagTestUtils.mm index ee1851450..a766294b9 100644 --- a/test/react-native/native-test-utils/ios/BugsnagTestUtils.mm +++ b/test/react-native/native-test-utils/ios/BugsnagTestUtils.mm @@ -81,15 +81,33 @@ + (BOOL)startNativePerformanceWithConfiguration:(NSDictionary *)configuration { config.autoInstrumentAppStarts = autoInstrumentAppStarts; config.autoInstrumentViewControllers = autoInstrumentViewLoads; config.autoInstrumentNetworkRequests = NO; - config.enabledMetrics.cpu = YES; - config.enabledMetrics.memory = YES; - config.enabledMetrics.rendering = YES; config.internal.autoTriggerExportOnBatchSize = 1; config.internal.clearPersistenceOnStart = YES; - [config addPlugin:[BugsnagNativeSpansPlugin new]]; - [config addPlugin:[BugsnagJavascriptSpansPlugin new]]; - [config addPlugin:[BugsnagReactNativeAppStartPlugin new]]; + if (configuration[@"samplingProbability"]) { + config.samplingProbability = configuration[@"samplingProbability"]; + } + + if (configuration[@"enabledMetrics"]) { + NSDictionary *metricsConfig = configuration[@"enabledMetrics"]; + config.enabledMetrics.rendering = [metricsConfig[@"rendering"] boolValue]; + config.enabledMetrics.cpu = [metricsConfig[@"cpu"] boolValue]; + config.enabledMetrics.memory = [metricsConfig[@"memory"] boolValue]; + } else { + config.enabledMetrics.cpu = YES; + config.enabledMetrics.memory = YES; + config.enabledMetrics.rendering = YES; + } + + if (!configuration[@"nativeSpans"] || [configuration[@"nativeSpans"] boolValue]) { + [config addPlugin:[BugsnagNativeSpansPlugin new]]; + } + if (!configuration[@"jsSpans"] || [configuration[@"jsSpans"] boolValue]) { + [config addPlugin:[BugsnagJavascriptSpansPlugin new]]; + } + if (!configuration[@"nativeAppStarts"] || [configuration[@"nativeAppStarts"] boolValue]) { + [config addPlugin:[BugsnagReactNativeAppStartPlugin new]]; + } [BugsnagPerformance startWithConfiguration:config]; diff --git a/test/react-native/scenario-launcher/benchmarks/BenchmarkRunner.js b/test/react-native/scenario-launcher/benchmarks/BenchmarkRunner.js new file mode 100644 index 000000000..67d488758 --- /dev/null +++ b/test/react-native/scenario-launcher/benchmarks/BenchmarkRunner.js @@ -0,0 +1,192 @@ +const createRunResult = ( + timeTaken, + excludedTime, + iterations, +) => { + const measuredTime = timeTaken - excludedTime + return ({ + timeTaken, + excludedTime, + iterations, + measuredTime, + averageTimePerIteration: iterations > 0 ? measuredTime / iterations : 0 + }); +} + +const createBenchmarkResult = ( + benchmarkName, + runResults, + configFlags +) => { + const timeTaken = runResults.reduce((sum, r) => sum + r.timeTaken, 0) + const excludedTime = runResults.reduce((sum, r) => sum + r.excludedTime, 0) + const iterations = runResults.reduce((sum, r) => sum + r.iterations, 0) + const measuredTime = timeTaken - excludedTime + + return ({ + benchmarkName, + runResults, + configFlags, + timeTaken, + excludedTime, + iterations, + measuredTime, + averageTimePerIteration: iterations > 0 ? measuredTime / iterations : 0 + }); +} + +// Create separate trackers for sync and async +const createSyncTracker = (iterations) => ({ + remainingIterations: iterations, + excludedTime: 0, + + measureRepeated (fn) { + while (this.remainingIterations > 0) { + fn() + this.remainingIterations-- + } + }, + + runWithTimingDisabled (fn) { + const startTime = performance.now() + try { + return fn() + } finally { + const endTime = performance.now() + this.excludedTime += (endTime - startTime) + } + } +}) + + +const createAsyncTracker = (iterations) => ({ + remainingIterations: iterations, + excludedTime: 0, + + async measureRepeated (fn) { + while (this.remainingIterations > 0) { + await fn() + this.remainingIterations-- + } + }, + + async runWithTimingDisabled (fn) { + const startTime = performance.now() + try { + return await fn() + } finally { + const endTime = performance.now() + this.excludedTime += (endTime - startTime) + } + } +}) + +// Benchmark runner factory +export const createBenchmarkRunner = (config = {}) => { + const { + configFlags = new Set(), + warmupIterations = 1000, + iterationsPerRun = 25000, + numberOfRuns = 5 + } = config + + const cleanup = () => { + return new Promise(resolve => setTimeout(resolve, 100)) + } + + // Run sync benchmark + const runSyncBenchmark = async (benchmark) => { + console.log(`Running sync benchmark: ${benchmark.name}`) + + // Warmup + const warmupTracker = createSyncTracker(warmupIterations) + benchmark.setup?.() + benchmark.run(warmupTracker) + benchmark.teardown?.() + await cleanup() + + // Main runs + const runResults = [] + for (let i = 0; i < numberOfRuns; i++) { + benchmark.setup?.() + const tracker = createSyncTracker(iterationsPerRun) + + const startTime = performance.now() + benchmark.run(tracker) + const endTime = performance.now() + + benchmark.teardown?.() + + runResults.push(createRunResult( + (endTime - startTime), + tracker.excludedTime, + iterationsPerRun, + )) + + await cleanup() + } + + console.error(`Reporting results for benchmark: ${benchmark.name}`) + return createBenchmarkResult(benchmark.name, runResults, configFlags) + } + + // Run async benchmark + const runAsyncBenchmark = async (benchmark) => { + console.error(`Running async benchmark: ${benchmark.name}`) + + // Warmup + const warmupTracker = createAsyncTracker(warmupIterations) + await benchmark.setup?.() + await benchmark.run(warmupTracker) + await benchmark.teardown?.() + await cleanup() + + // Main runs + const runResults = [] + for (let i = 0; i < numberOfRuns; i++) { + await benchmark.setup?.() + const tracker = createAsyncTracker(iterationsPerRun) + + const startTime = performance.now() + await benchmark.run(tracker) + const endTime = performance.now() + + await benchmark.teardown?.() + + runResults.push(createRunResult( + (endTime - startTime), + tracker.excludedTime, + iterationsPerRun, + )) + + await cleanup() + } + + console.error(`Reporting results for async benchmark: ${benchmark.name}`) + return createBenchmarkResult(benchmark.name, runResults, configFlags) + } + + return { + /** + * Run a Benchmark and report the results as a Promise. + * + * ```typescript + * interface Benchmark { + * name: string + * run: (tracker: BenchmarkTracker) => Promise|void + * setup?: () => Promise + * teardown?: () => Promise + * isAsync: boolean // true if run returns a promise, false to measure synchronous code + * } + * ``` + * + * @param benchmark + * @returns {Promise<{benchmarkName: *, runResults: *, configFlags: *, timeTaken: *, excludedTime: *, iterations: *, getMeasuredTime: function(): *, getAverageTimePerIteration: function(): number}>} + */ + runBenchmark: (benchmark) => { + return benchmark.isAsync + ? runAsyncBenchmark(benchmark) + : runSyncBenchmark(benchmark) + } + } +} diff --git a/test/react-native/scenario-launcher/benchmarks/core/NativeNamedSpanBenchmark.js b/test/react-native/scenario-launcher/benchmarks/core/NativeNamedSpanBenchmark.js new file mode 100644 index 000000000..29e9a52e3 --- /dev/null +++ b/test/react-native/scenario-launcher/benchmarks/core/NativeNamedSpanBenchmark.js @@ -0,0 +1,24 @@ +import BugsnagPerformance from '@bugsnag/react-native-performance' +import { NativeScenarioLauncher } from '../../lib/native' +import { NativeSpanQuery } from "@bugsnag/plugin-react-native-span-access"; + +export const name = 'NativeNamedSpanBenchmark' +export const isAsync = true +export const run = (benchmarkTracker) => { + let spanCount = 1 + return benchmarkTracker.measureRepeated(async () => { + const spanName = `NativeTestSpan[${spanCount}]` + spanCount++ + + // this benchmark doesn't consider the overhead of creating the native span, only retrieving updating & ending it + await benchmarkTracker.runWithTimingDisabled(() => NativeScenarioLauncher.startNativeSpan({ + name: spanName, + })) + + const nativeSpanControls = BugsnagPerformance.getSpanControls(new NativeSpanQuery(spanName)) + await nativeSpanControls.updateSpan((mutator) => { + mutator.setAttribute('test.remote.attribute', spanName) + mutator.end(performance.now()) + }) + }) +} diff --git a/test/react-native/scenario-launcher/benchmarks/core/SpanWithAttributesBenchmark.js b/test/react-native/scenario-launcher/benchmarks/core/SpanWithAttributesBenchmark.js new file mode 100644 index 000000000..df8773eb3 --- /dev/null +++ b/test/react-native/scenario-launcher/benchmarks/core/SpanWithAttributesBenchmark.js @@ -0,0 +1,14 @@ +import BugsnagPerformance from '@bugsnag/react-native-performance' + +export const name = 'SpanWithAttributesBenchmark' +export const isAsync = false +export const run = (benchmarkTracker) => { + benchmarkTracker.measureRepeated(() => { + const span = BugsnagPerformance.startSpan() + span.setAttribute('custom.string', 'abc123') + span.setAttribute('custom.int', 123) + span.setAttribute('custom.number', 123.321) + span.setAttribute('custom.bool', false) + span.end() + }) +} diff --git a/test/react-native/scenario-launcher/benchmarks/core/index.js b/test/react-native/scenario-launcher/benchmarks/core/index.js new file mode 100644 index 000000000..7bd1a33f3 --- /dev/null +++ b/test/react-native/scenario-launcher/benchmarks/core/index.js @@ -0,0 +1,2 @@ +export * as SpanWithAttributesBenchmark from './SpanWithAttributesBenchmark' +export * as NativeNamedSpanBenchmark from './NativeNamedSpanBenchmark' diff --git a/test/react-native/scenario-launcher/benchmarks/index.js b/test/react-native/scenario-launcher/benchmarks/index.js new file mode 100644 index 000000000..e088d287c --- /dev/null +++ b/test/react-native/scenario-launcher/benchmarks/index.js @@ -0,0 +1,106 @@ +import { NativeScenarioLauncher } from '../lib/native' +import { createBenchmarkRunner } from './BenchmarkRunner' +import * as Benchmarks from './core' +import BugsnagPerformance from '@bugsnag/react-native-performance'; +import { BugsnagJavascriptSpansPlugin, BugsnagNativeSpansPlugin } from "@bugsnag/plugin-react-native-span-access"; + +const reportBenchmarkResults = async (results, mazeAddress) => { + const url = `http://${mazeAddress}/metrics` + + // Format timestamp as "EEE MMM dd HH:mm:ss zzz yyyy" + const timestamp = new Date().toString() + + // Create flat JSON object + const flatResults = { + timestamp, + benchmark: results.name, + totalTimeTaken: results.totalTimeTaken, + totalExcludedTime: results.totalExcludedTime, + totalMeasuredTime: results.totalMeasuredTime, + totalIterations: results.totalIterations, + } + + // Add config flags as boolean properties + if (results.configFlags) { + for (const flag of results.configFlags) { + flatResults[flag] = true + } + } + + // Add individual run results + if (results.runResults) { + results.runResults.forEach((run, index) => { + flatResults[`timeTaken.${index}`] = run.timeTaken + flatResults[`excludedTime.${index}`] = run.excludedTime + flatResults[`measuredTime.${index}`] = run.measuredTime + flatResults[`iterations.${index}`] = run.iterations + }) + } + + try { + console.error(`[BugsnagPerformance] Sending benchmark results to ${url}`) + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(flatResults), + }) + + if (!response.ok) { + console.error(`[BugsnagPerformance] Failed to send benchmark results: ${response.status} ${response.statusText}`) + } else { + console.error(`[BugsnagPerformance] Successfully sent benchmark results`) + } + } catch (error) { + console.error(`[BugsnagPerformance] Error sending benchmark results: ${error.message}`) + } +} + +export const runBenchmark = async (name, config, apiKey, endpoint, mazeAddress) => { + const configFlags = new Set(config.split(' ')) + + const plugins = [] + + if (configFlags.has('nativeSpans')) { + plugins.push(new BugsnagNativeSpansPlugin()) + } + + if (configFlags.has('jsSpans')) { + plugins.push(new BugsnagJavascriptSpansPlugin()) + } + + const configuration = { + apiKey, + endpoint, + nativeSpans: configFlags.has('nativeSpans'), + jsSpans: configFlags.has('jsSpans'), + enabledMetrics: { + rendering: configFlags.has('rendering'), + cpu: configFlags.has('cpu'), + memory: configFlags.has('memory'), + }, + // benchmarks discard all spans + samplingProbability: 0, + } + + if (configFlags.has('native')) { + await NativeScenarioLauncher.startNativePerformance(configuration) + await BugsnagPerformance.attach({ + plugins + }); + } else { + BugsnagPerformance.start(configuration); + } + + const runner = createBenchmarkRunner({configFlags}) + const results = await runner.runBenchmark(Benchmarks[name]) + + // Add benchmark name and config flags to results for reporting + results.name = name + results.configFlags = configFlags + + await reportBenchmarkResults(results, mazeAddress) + + NativeScenarioLauncher.exitApp() +} diff --git a/test/react-native/scenario-launcher/lib/CommandRunner.js b/test/react-native/scenario-launcher/lib/CommandRunner.js index aeff9bc91..6007870c2 100644 --- a/test/react-native/scenario-launcher/lib/CommandRunner.js +++ b/test/react-native/scenario-launcher/lib/CommandRunner.js @@ -1,4 +1,4 @@ -import { getMazeRunnerAddress } from './ConfigFileReader' +import { getMazeRunnerAddress as readMazeRunnerAddress } from './ConfigFileReader' const DEFAULT_RETRY_COUNT = 20 const INTERVAL = 500 @@ -8,6 +8,13 @@ let lastCommandUuid const delay = ms => new Promise(resolve => setTimeout(resolve, ms)) +export const getMazeRunnerAddress = async () => { + if (!mazeAddress) { + mazeAddress = await readMazeRunnerAddress() + } + return mazeAddress +} + export async function getCurrentCommand (allowedRetries = DEFAULT_RETRY_COUNT) { if (allowedRetries <= 0) { throw new Error(`allowedRetries must be a number >0, got '${allowedRetries}'`) diff --git a/test/react-native/scenario-launcher/lib/ScenarioLauncher.js b/test/react-native/scenario-launcher/lib/ScenarioLauncher.js index 8bd6be36d..ab24c5702 100644 --- a/test/react-native/scenario-launcher/lib/ScenarioLauncher.js +++ b/test/react-native/scenario-launcher/lib/ScenarioLauncher.js @@ -1,7 +1,8 @@ import * as Scenarios from '../scenarios' -import { getCurrentCommand } from './CommandRunner' +import { getCurrentCommand, getMazeRunnerAddress } from './CommandRunner' import { clearPersistedState, setDeviceId, setSamplingProbability } from './Persistence' import { NativeScenarioLauncher } from './native' +import { runBenchmark } from '../benchmarks' import { wrapperComponentProvider } from '../scenarios/core/WrapperComponentProviderScenario' import React from 'react' import BugsnagPerformance from '@bugsnag/react-native-performance' @@ -32,7 +33,7 @@ async function runScenario (setScenario, scenarioName, apiKey, endpoint) { if (!scenario.doNotStartBugsnagPerformance) { BugsnagPerformance.start(scenarioConfig) } - + setScenario({ name: scenarioName, config: scenarioConfig }) } @@ -51,6 +52,15 @@ export async function launchScenario (setScenario, clearPersistedData = true) { command.endpoint ) + case 'run-benchmark': + return await runBenchmark( + command.benchmark_name, + command.config, + command.api_key, + command.endpoint, + await getMazeRunnerAddress() + ) + case 'clear-all-persistent-data': return await launchScenario(setScenario, true) diff --git a/test/react-native/scenario-launcher/package.json b/test/react-native/scenario-launcher/package.json index 64e289ae3..fd011bec3 100644 --- a/test/react-native/scenario-launcher/package.json +++ b/test/react-native/scenario-launcher/package.json @@ -29,6 +29,7 @@ "/android", "/ios", "/lib", + "/benchmarks", "/scenarios/core", "/scenarios/index.js", "/ScenarioLauncher.podspec" diff --git a/test/react-native/scripts/utils/android-utils.js b/test/react-native/scripts/utils/android-utils.js index 109179cd8..3e1ac5070 100644 --- a/test/react-native/scripts/utils/android-utils.js +++ b/test/react-native/scripts/utils/android-utils.js @@ -9,7 +9,7 @@ const { replaceInFile, appendToFileIfNotExists } = require('./file-utils') function configureAndroidProject (fixtureDir, isNewArchEnabled, reactNativeVersion) { // set android:usesCleartextTraffic="true" in AndroidManifest.xml const androidManifestPath = `${fixtureDir}/android/app/src/main/AndroidManifest.xml` - replaceInFile(androidManifestPath, '