diff --git a/internal/benchmark/README.md b/internal/benchmark/README.md new file mode 100644 index 00000000000..ec9c5ee61ca --- /dev/null +++ b/internal/benchmark/README.md @@ -0,0 +1,110 @@ +# UI5 CLI Benchmark Tool + +A benchmarking utility for measuring and comparing the performance of UI5 CLI commands across different git revisions. + +## Prerequisites + +This tool requires [hyperfine](https://github.com/sharkdp/hyperfine) to be installed. Follow the installation instructions in the hyperfine repository for your platform. + +## Installation + +Make the `ui5-cli-benchmark` binary available globally: + +```bash +npm link +``` + +## Usage + +```bash +ui5-cli-benchmark run [...] +``` + +### Arguments + +- `` - Path to a YAML configuration file (required) +- `[...]` - One or more project directories to benchmark (optional, defaults to current working directory) + +### Example + +```bash +# Run benchmarks using example config in current directory +ui5-cli-benchmark run config/.example.yaml + +# Run benchmarks in specific project directories +ui5-cli-benchmark run config/.example.yaml /path/to/project1 /path/to/project2 +``` + +## Configuration + +Create a YAML configuration file with the following structure: + +### Revisions + +Define the git revisions to benchmark: + +```yaml +revisions: + baseline: + name: "Baseline" + revision: + merge_base_from: "feat/example-feature" + target_branch: "main" + example_feature: + name: "Example Feature" + revision: "feat/example-feature" +``` + +Each revision can specify: +- `name` - Display name for the revision +- `revision` - Either a branch/commit hash or an object with `merge_base_from` and `target_branch` to compute the merge base + +### Hyperfine Settings + +Configure the benchmark runner (uses [hyperfine](https://github.com/sharkdp/hyperfine)): + +```yaml +hyperfine: + warmup: 1 # Number of warmup runs + runs: 10 # Number of benchmark runs +``` + +### Groups + +Define logical groups for organizing benchmark results: + +```yaml +groups: + build: + name: "ui5 build" +``` + +### Benchmarks + +Define the commands to benchmark: + +```yaml +benchmarks: + - command: "build" + prepare: "rm -rf .ui5-cache" # Optional: command to run before each benchmark + groups: + build: + name: "build" + revisions: # Optional: limit to specific revisions + - "example_feature" +``` + +Each benchmark can specify: +- `command` - The UI5 CLI command to run (e.g., "build", "build --clean-dest") +- `prepare` - Optional shell command to run before each benchmark iteration +- `groups` - Group(s) this benchmark belongs to with display names +- `revisions` - Optional array to limit which revisions run this benchmark (defaults to all) + +## Output + +The tool generates: +- Console output with progress and summary +- Markdown report with benchmark results +- JSON report with raw data + +Results are organized by revision and group for easy comparison. diff --git a/internal/benchmark/cli.js b/internal/benchmark/cli.js new file mode 100755 index 00000000000..df515e9d74a --- /dev/null +++ b/internal/benchmark/cli.js @@ -0,0 +1,99 @@ +#!/usr/bin/env node + +import {fileURLToPath} from "node:url"; +import path from "node:path"; +import fs from "node:fs"; +import BenchmarkRunner from "./lib/BenchmarkRunner.js"; +import git from "./lib/utils/git.js"; +import npm from "./lib/utils/npm.js"; +import {spawnProcess} from "./lib/utils/process.js"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +function printUsageAndExit() { + console.error( + "Usage:\n\t" + + "ui5-cli-benchmark run [...]" + ); + process.exit(1); +} + +export const commands = { + async run(args, options = {}) { + const configFilePath = args[0]; + const projectDirs = args.slice(1); + + // Validate arguments + if (!configFilePath) { + return printUsageAndExit(); + } + + // Determine repository and CLI paths + const repositoryPath = path.resolve(__dirname, "../.."); + const ui5CliPath = path.resolve(repositoryPath, "packages/cli/bin/ui5.cjs"); + + // Create BenchmarkRunner with injected dependencies + const benchmarkRunner = new BenchmarkRunner({ + git: options.git || git, + npm: options.npm || npm, + spawnProcess: options.spawnProcess || spawnProcess, + fs: options.fs || fs + }); + + // Run benchmarks + const result = await benchmarkRunner.run({ + configFilePath, + repositoryPath, + ui5CliPath, + projectDirs: projectDirs.length > 0 ? projectDirs : undefined, + timestamp: options.timestamp + }); + + if (!result.success) { + process.exit(1); + } + } +}; + +async function main() { + const args = process.argv.slice(2); + + if (args.length === 0 || args[0] === "-h" || args[0] === "--help") { + return printUsageAndExit(); + } + + const command = args[0]; + const commandArgs = args.slice(1); + const fn = commands[command]; + + // Validate command name + if (!fn) { + process.stderr.write(`Unknown command: '${command}'\n\n`); + return process.exit(1); + } + + // Execute handler + try { + await fn(commandArgs); + } catch (error) { + console.error(`Unexpected error: ${error.message}`); + console.error("Stack trace:", error.stack); + + process.exit(1); + } +} + + +// Handle uncaught exceptions +process.on("uncaughtException", (error) => { + console.error("Uncaught exception:", error.message); + process.exit(1); +}); + +process.on("unhandledRejection", (reason, promise) => { + console.error("Unhandled rejection at:", promise, "reason:", reason); + process.exit(1); +}); + +main(); diff --git a/internal/benchmark/config/.example.yaml b/internal/benchmark/config/.example.yaml new file mode 100644 index 00000000000..73f831889fc --- /dev/null +++ b/internal/benchmark/config/.example.yaml @@ -0,0 +1,35 @@ +# Example Benchmark configuration for UI5 CLI + +revisions: + baseline: + name: "Baseline" + revision: + merge_base_from: "feat/example-feature" + target_branch: "main" + example_feature: + name: "Example Feature" + revision: "feat/example-feature" + +hyperfine: + warmup: 1 + runs: 10 + +groups: + build: + name: "ui5 build" + +benchmarks: + + - command: "build" + groups: + build: + name: "build" + + - command: "build --some-new-flag" + groups: + build: + name: "build (with some new flag)" + revisions: + # Benchmark with some new flag is only relevant for the feature revision, + # as the baseline does not contain the flag + - "example_feature" diff --git a/internal/benchmark/eslint.config.js b/internal/benchmark/eslint.config.js new file mode 100644 index 00000000000..e72630b9687 --- /dev/null +++ b/internal/benchmark/eslint.config.js @@ -0,0 +1,10 @@ +import commonConfig from "../../eslint.common.config.js"; + +export default [ + ...commonConfig, + { + rules: { + "no-console": "off" // Allow console output in CLI tools + } + } +]; diff --git a/internal/benchmark/lib/BenchmarkRunner.js b/internal/benchmark/lib/BenchmarkRunner.js new file mode 100644 index 00000000000..414a6373ae5 --- /dev/null +++ b/internal/benchmark/lib/BenchmarkRunner.js @@ -0,0 +1,263 @@ +import path from "node:path"; +import ConfigurationLoader from "./ConfigurationLoader.js"; +import RevisionResolver from "./services/RevisionResolver.js"; +import ExecutionPlanner from "./services/ExecutionPlanner.js"; +import HyperfineRunner from "./services/HyperfineRunner.js"; +import ResultAggregator from "./services/ResultAggregator.js"; +import MarkdownReporter from "./reporters/MarkdownReporter.js"; +import JsonReporter from "./reporters/JsonReporter.js"; + +/** + * Main orchestrator for running benchmarks. + * Coordinates all services and manages the complete benchmark workflow. + */ +export default class BenchmarkRunner { + /** + * @param {object} dependencies - External dependencies (for dependency injection) + * @param {object} dependencies.git - Git utility module + * @param {object} dependencies.npm - npm utility module + * @param {Function} dependencies.spawnProcess - Process spawning function + * @param {object} dependencies.fs - File system module + */ + constructor({git, npm, spawnProcess, fs}) { + this.#git = git; + this.#npm = npm; + this.#spawnProcess = spawnProcess; + this.#fs = fs; + + // Initialize services with dependencies + this.#configLoader = new ConfigurationLoader(fs); + this.#revisionResolver = new RevisionResolver(git); + this.#executionPlanner = new ExecutionPlanner(); + this.#resultAggregator = new ResultAggregator(fs); + this.#markdownReporter = new MarkdownReporter(); + this.#jsonReporter = new JsonReporter(); + } + + #git; + #npm; + #spawnProcess; + #fs; + #configLoader; + #revisionResolver; + #executionPlanner; + #resultAggregator; + #markdownReporter; + #jsonReporter; + + /** + * Run benchmarks based on configuration. + * + * @param {object} options - Execution options + * @param {string} options.configFilePath - Path to YAML configuration file + * @param {string} options.repositoryPath - Path to the UI5 CLI repository + * @param {string} options.ui5CliPath - Path to the UI5 CLI executable + * @param {string[]} [options.projectDirs] - Project directories to benchmark (defaults to cwd) + * @param {Date} [options.timestamp] - Timestamp for the run (defaults to now) + * @returns {Promise<{success: boolean, failures: Array}>} + */ + async run({ + configFilePath, + repositoryPath, + ui5CliPath, + projectDirs = [process.cwd()], + timestamp = new Date() + }) { + console.log("=".repeat(80)); + console.log("UI5 CLI Benchmark Tool"); + console.log("=".repeat(80)); + console.log(); + + // Validate repository state + await this.#validateRepository(repositoryPath); + + // Load and validate configuration + console.log(`Loading configuration from: ${configFilePath}`); + const config = this.#configLoader.load(configFilePath); + console.log(`✓ Configuration loaded successfully\n`); + + // Resolve all revisions + console.log("Resolving revisions..."); + const resolvedRevisions = await this.#revisionResolver.resolveAll(config, repositoryPath); + for (const [key, info] of resolvedRevisions.entries()) { + console.log(` ${info.name} (${key}): ${info.commitHash}`); + } + console.log(`✓ Resolved ${resolvedRevisions.size} revision(s)\n`); + + // Plan execution + console.log("Planning benchmark execution..."); + const executionPlan = this.#executionPlanner.plan(config, resolvedRevisions); + console.log(this.#executionPlanner.getSummary(executionPlan)); + + // Run benchmarks for each project directory + const allFailures = []; + for (const projectDir of projectDirs) { + console.log("=".repeat(80)); + console.log(`Benchmarking project: ${projectDir}`); + console.log("=".repeat(80)); + console.log(); + + const result = await this.#runForProject({ + config, + executionPlan, + repositoryPath, + ui5CliPath, + projectDir, + timestamp + }); + + allFailures.push(...result.failures); + } + + console.log("\n" + "=".repeat(80)); + if (allFailures.length === 0) { + console.log("✅ All benchmarks completed successfully!"); + } else { + console.log(`⚠️ Completed with ${allFailures.length} failure(s)`); + } + console.log("=".repeat(80) + "\n"); + + return { + success: allFailures.length === 0, + failures: allFailures + }; + } + + /** + * Validate that the repository is in a clean state. + * + * @param {string} repositoryPath - Path to repository + * @returns {Promise} + */ + async #validateRepository(repositoryPath) { + console.log(`Checking repository status: ${repositoryPath}`); + const gitStatus = await this.#git.checkGitStatus(repositoryPath); + if (gitStatus) { + throw new Error( + "Repository has uncommitted changes. " + + "Please commit or stash your changes before running benchmarks." + ); + } + console.log("✓ Repository is clean\n"); + } + + /** + * Run benchmarks for a single project directory. + * + * @param {object} params - Execution parameters + * @param {Configuration} params.config - Configuration + * @param {Map} params.executionPlan - Execution plan + * @param {string} params.repositoryPath - UI5 CLI repository path + * @param {string} params.ui5CliPath - UI5 CLI executable path + * @param {string} params.projectDir - Project directory to benchmark + * @param {Date} params.timestamp - Benchmark timestamp + * @returns {Promise<{failures: Array}>} + */ + async #runForProject({ + config, + executionPlan, + repositoryPath, + ui5CliPath, + projectDir, + timestamp + }) { + // Get project revision (if it's a git repository) + const projectRevision = await this.#git.getProjectRevision(projectDir); + if (projectRevision) { + console.log(`Project Git Revision: ${projectRevision}\n`); + } else { + console.log("Project is not a git repository\n"); + } + + // Create HyperfineRunner instance for this project + const hyperfineRunner = new HyperfineRunner({ + git: this.#git, + npm: this.#npm, + spawnProcess: this.#spawnProcess, + ui5CliPath + }); + + // Execute benchmarks for each revision + const executionResults = new Map(); + + for (const [revisionKey, revisionPlan] of executionPlan.entries()) { + const result = await hyperfineRunner.run({ + revisionPlan, + hyperfineOptions: { + warmup: config.warmup, + runs: config.runs + }, + repositoryPath, + workingDirectory: projectDir + }); + + executionResults.set(revisionKey, result); + } + + // Aggregate results + console.log("\nAggregating results..."); + const aggregatedResults = this.#resultAggregator.aggregate({ + config, + executionPlan, + executionResults + }); + console.log(`✓ Results aggregated\n`); + + // Generate reports + await this.#generateReports({ + aggregatedResults, + projectRevision, + projectDir, + timestamp + }); + + return { + failures: aggregatedResults.failures + }; + } + + /** + * Generate and save reports. + * + * @param {object} params - Report parameters + * @param {object} params.aggregatedResults - Aggregated results + * @param {string} params.projectRevision - Project git revision + * @param {string} params.projectDir - Project directory + * @param {Date} params.timestamp - Benchmark timestamp + * @returns {Promise} + */ + async #generateReports({aggregatedResults, projectRevision, projectDir, timestamp}) { + console.log("Generating reports..."); + + // Generate Markdown report + const markdown = this.#markdownReporter.generate({ + aggregatedResults, + projectRevision, + workingDirectory: projectDir, + timestamp + }); + + const markdownPath = path.resolve( + projectDir, + `benchmark-summary-${timestamp.toISOString().replace(/[:.]/g, "-")}.md` + ); + this.#fs.writeFileSync(markdownPath, markdown, "utf8"); + console.log(` ✓ Markdown report: ${markdownPath}`); + + // Generate JSON report + const json = this.#jsonReporter.generate({ + aggregatedResults, + projectRevision, + timestamp + }); + + const jsonPath = path.resolve( + projectDir, + `benchmark-results-${timestamp.toISOString().replace(/[:.]/g, "-")}.json` + ); + this.#fs.writeFileSync(jsonPath, json, "utf8"); + console.log(` ✓ JSON report: ${jsonPath}`); + + console.log("✓ Reports generated\n"); + } +} diff --git a/internal/benchmark/lib/ConfigurationLoader.js b/internal/benchmark/lib/ConfigurationLoader.js new file mode 100644 index 00000000000..609245b483d --- /dev/null +++ b/internal/benchmark/lib/ConfigurationLoader.js @@ -0,0 +1,55 @@ +import yaml from "js-yaml"; +import Configuration from "./benchmark/Configuration.js"; + +/** + * Loads and parses benchmark configuration from YAML files. + */ +export default class ConfigurationLoader { + /** + * @param {object} fs - File system module (for dependency injection) + */ + constructor(fs) { + this.#fs = fs; + } + + #fs; + + /** + * Load and parse a YAML configuration file. + * + * @param {string} configFilePath - Absolute path to the YAML configuration file + * @returns {Configuration} + */ + load(configFilePath) { + if (!configFilePath || typeof configFilePath !== "string") { + throw new Error("Configuration file path must be a non-empty string"); + } + + let fileContents; + try { + fileContents = this.#fs.readFileSync(configFilePath, "utf8"); + } catch (error) { + if (error.code === "ENOENT") { + throw new Error(`Configuration file not found: ${configFilePath}`); + } + throw new Error(`Failed to read configuration file: ${error.message}`); + } + + let parsedYaml; + try { + parsedYaml = yaml.load(fileContents); + } catch (error) { + throw new Error(`Failed to parse YAML configuration: ${error.message}`); + } + + if (!parsedYaml || typeof parsedYaml !== "object") { + throw new Error("Configuration file must contain a valid YAML object"); + } + + try { + return new Configuration(parsedYaml); + } catch (error) { + throw new Error(`Invalid configuration: ${error.message}`); + } + } +} diff --git a/internal/benchmark/lib/benchmark/BenchmarkSpec.js b/internal/benchmark/lib/benchmark/BenchmarkSpec.js new file mode 100644 index 00000000000..30d98670cde --- /dev/null +++ b/internal/benchmark/lib/benchmark/BenchmarkSpec.js @@ -0,0 +1,122 @@ +/** + * Represents a single benchmark specification with its command, preparation, + * group memberships, and revision restrictions. + */ +export default class BenchmarkSpec { + /** + * @param {object} config - The benchmark configuration object + * @param {string} config.command - The UI5 CLI command to benchmark (e.g., "build") + * @param {string} [config.prepare] - Optional shell command to run before each benchmark + * @param {object} config.groups - Map of group keys to group-specific config + * @param {string} config.groups[].name - Display name for this benchmark in the group + * @param {string[]} [config.revisions] - Optional list of revision keys this benchmark should run on + * @param {number} index - The index of this benchmark in the configuration (for ordering) + */ + constructor(config, index) { + if (!config || typeof config !== "object") { + throw new Error("Benchmark configuration must be an object"); + } + if (!config.command || typeof config.command !== "string") { + throw new Error("Benchmark must have a command string"); + } + if (config.prepare !== undefined && typeof config.prepare !== "string") { + throw new Error("Benchmark prepare must be a string if provided"); + } + if (!config.groups || typeof config.groups !== "object" || Object.keys(config.groups).length === 0) { + throw new Error("Benchmark must belong to at least one group"); + } + + // Validate group configurations + for (const [groupKey, groupConfig] of Object.entries(config.groups)) { + if (!groupConfig || typeof groupConfig !== "object") { + throw new Error(`Benchmark group '${groupKey}' configuration must be an object`); + } + if (!groupConfig.name || typeof groupConfig.name !== "string") { + throw new Error(`Benchmark group '${groupKey}' must have a name`); + } + } + + // Validate revisions if provided + if (config.revisions !== undefined) { + if (!Array.isArray(config.revisions)) { + throw new Error("Benchmark revisions must be an array if provided"); + } + if (config.revisions.length === 0) { + throw new Error("Benchmark revisions array must not be empty if provided"); + } + for (const rev of config.revisions) { + if (typeof rev !== "string") { + throw new Error("Benchmark revision keys must be strings"); + } + } + } + + this.#index = index; + this.#command = config.command; + this.#prepare = config.prepare || null; + this.#groupMemberships = new Map(Object.entries(config.groups)); + this.#revisionKeys = config.revisions ? [...config.revisions] : null; + } + + #index; + #command; + #prepare; + #groupMemberships; // Map + #revisionKeys; // null means all revisions, otherwise array of revision keys + + get index() { + return this.#index; + } + + get command() { + return this.#command; + } + + get prepare() { + return this.#prepare; + } + + get groupMemberships() { + return new Map(this.#groupMemberships); + } + + get revisionKeys() { + return this.#revisionKeys ? [...this.#revisionKeys] : null; + } + + /** + * Check if this benchmark should run on a specific revision + * + * @param {string} revisionKey - The revision key to check + * @returns {boolean} + */ + shouldRunOnRevision(revisionKey) { + if (this.#revisionKeys === null) { + return true; // No restriction, runs on all revisions + } + return this.#revisionKeys.includes(revisionKey); + } + + /** + * Get the display name for this benchmark in a specific group + * + * @param {string} groupKey - The group key + * @returns {string} + */ + getGroupDisplayName(groupKey) { + const groupConfig = this.#groupMemberships.get(groupKey); + if (!groupConfig) { + throw new Error(`Benchmark is not a member of group '${groupKey}'`); + } + return groupConfig.name; + } + + /** + * Get all group keys this benchmark belongs to + * + * @returns {string[]} + */ + getGroupKeys() { + return Array.from(this.#groupMemberships.keys()); + } +} diff --git a/internal/benchmark/lib/benchmark/Configuration.js b/internal/benchmark/lib/benchmark/Configuration.js new file mode 100644 index 00000000000..618869b1256 --- /dev/null +++ b/internal/benchmark/lib/benchmark/Configuration.js @@ -0,0 +1,162 @@ +import Revision from "./Revision.js"; +import Group from "./Group.js"; +import BenchmarkSpec from "./BenchmarkSpec.js"; + +/** + * Represents the complete benchmark configuration with revisions, groups, and benchmarks. + */ +export default class Configuration { + /** + * @param {object} config - The parsed YAML configuration object + * @param {object} config.revisions - Map of revision keys to revision configs + * @param {object} config.groups - Map of group keys to group configs + * @param {object} config.hyperfine - Hyperfine execution options + * @param {number} config.hyperfine.warmup - Number of warmup runs + * @param {number} config.hyperfine.runs - Number of benchmark runs + * @param {Array} config.benchmarks - Array of benchmark specifications + */ + constructor(config) { + if (!config || typeof config !== "object") { + throw new Error("Configuration must be an object"); + } + + // Validate and parse hyperfine options + if (!config.hyperfine || typeof config.hyperfine !== "object") { + throw new Error("Configuration must have a 'hyperfine' section"); + } + if (typeof config.hyperfine.warmup !== "number" || config.hyperfine.warmup < 0) { + throw new Error("hyperfine.warmup must be a non-negative number"); + } + if (typeof config.hyperfine.runs !== "number" || config.hyperfine.runs < 1) { + throw new Error("hyperfine.runs must be a positive number"); + } + + this.#warmup = config.hyperfine.warmup; + this.#runs = config.hyperfine.runs; + + // Validate and parse revisions + if (!config.revisions || typeof config.revisions !== "object" || + Object.keys(config.revisions).length === 0) { + throw new Error("Configuration must have at least one revision"); + } + + this.#revisions = new Map(); + for (const [key, revConfig] of Object.entries(config.revisions)) { + this.#revisions.set(key, new Revision(key, revConfig)); + } + + // Validate and parse groups + if (!config.groups || typeof config.groups !== "object" || + Object.keys(config.groups).length === 0) { + throw new Error("Configuration must have at least one group"); + } + + this.#groups = new Map(); + for (const [key, groupConfig] of Object.entries(config.groups)) { + this.#groups.set(key, new Group(key, groupConfig)); + } + + // Validate and parse benchmarks + if (!Array.isArray(config.benchmarks) || config.benchmarks.length === 0) { + throw new Error("Configuration must have at least one benchmark"); + } + + this.#benchmarks = config.benchmarks.map((benchConfig, index) => { + const spec = new BenchmarkSpec(benchConfig, index); + + // Validate that all referenced groups exist + for (const groupKey of spec.getGroupKeys()) { + if (!this.#groups.has(groupKey)) { + throw new Error( + `Benchmark ${index} references unknown group '${groupKey}'` + ); + } + } + + // Validate that all referenced revisions exist + if (spec.revisionKeys !== null) { + for (const revKey of spec.revisionKeys) { + if (!this.#revisions.has(revKey)) { + throw new Error( + `Benchmark ${index} references unknown revision '${revKey}'` + ); + } + } + } + + return spec; + }); + } + + #revisions; // Map + #groups; // Map + #benchmarks; // Array + #warmup; + #runs; + + get revisions() { + return new Map(this.#revisions); + } + + get groups() { + return new Map(this.#groups); + } + + get benchmarks() { + return [...this.#benchmarks]; + } + + get warmup() { + return this.#warmup; + } + + get runs() { + return this.#runs; + } + + /** + * Get a revision by key + * + * @param {string} key - The revision key + * @returns {Revision} + */ + getRevision(key) { + const revision = this.#revisions.get(key); + if (!revision) { + throw new Error(`Unknown revision key: ${key}`); + } + return revision; + } + + /** + * Get a group by key + * + * @param {string} key - The group key + * @returns {Group} + */ + getGroup(key) { + const group = this.#groups.get(key); + if (!group) { + throw new Error(`Unknown group key: ${key}`); + } + return group; + } + + /** + * Get all revision keys + * + * @returns {string[]} + */ + getRevisionKeys() { + return Array.from(this.#revisions.keys()); + } + + /** + * Get all group keys + * + * @returns {string[]} + */ + getGroupKeys() { + return Array.from(this.#groups.keys()); + } +} diff --git a/internal/benchmark/lib/benchmark/Group.js b/internal/benchmark/lib/benchmark/Group.js new file mode 100644 index 00000000000..254f1d5bafa --- /dev/null +++ b/internal/benchmark/lib/benchmark/Group.js @@ -0,0 +1,35 @@ +/** + * Represents a benchmark group used for organizing and comparing results. + */ +export default class Group { + /** + * @param {string} key - The unique identifier for this group (from YAML key) + * @param {object} config - The group configuration object + * @param {string} config.name - Display name for this group + */ + constructor(key, config) { + if (!key || typeof key !== "string") { + throw new Error("Group key must be a non-empty string"); + } + if (!config || typeof config !== "object") { + throw new Error(`Group '${key}' configuration must be an object`); + } + if (!config.name || typeof config.name !== "string") { + throw new Error(`Group '${key}' must have a name`); + } + + this.#key = key; + this.#name = config.name; + } + + #key; + #name; + + get key() { + return this.#key; + } + + get name() { + return this.#name; + } +} diff --git a/internal/benchmark/lib/benchmark/Revision.js b/internal/benchmark/lib/benchmark/Revision.js new file mode 100644 index 00000000000..e5de7d84a19 --- /dev/null +++ b/internal/benchmark/lib/benchmark/Revision.js @@ -0,0 +1,96 @@ +/** + * Represents a revision configuration that can be either a direct git reference + * or a merge-base derived reference. + */ +export default class Revision { + /** + * @param {string} key - The unique identifier for this revision (from YAML key) + * @param {object} config - The revision configuration object + * @param {string} config.name - Display name for this revision + * @param {string|object} config.revision - Either a git ref string or merge-base config object + * @param {string} [config.revision.merge_base_from] - Branch to find merge base from + * @param {string} [config.revision.target_branch] - Target branch for merge base calculation + */ + constructor(key, config) { + if (!key || typeof key !== "string") { + throw new Error("Revision key must be a non-empty string"); + } + if (!config || typeof config !== "object") { + throw new Error(`Revision '${key}' configuration must be an object`); + } + if (!config.name || typeof config.name !== "string") { + throw new Error(`Revision '${key}' must have a name`); + } + if (!config.revision) { + throw new Error(`Revision '${key}' must have a revision definition`); + } + + this.#key = key; + this.#name = config.name; + + // Parse revision definition + if (typeof config.revision === "string") { + this.#type = "direct"; + this.#gitReference = config.revision; + } else if (typeof config.revision === "object") { + if (!config.revision.merge_base_from || !config.revision.target_branch) { + throw new Error( + `Revision '${key}' with merge_base must specify both 'merge_base_from' and 'target_branch'` + ); + } + this.#type = "merge_base"; + this.#mergeBaseFrom = config.revision.merge_base_from; + this.#targetBranch = config.revision.target_branch; + } else { + throw new Error(`Revision '${key}' has invalid revision type`); + } + } + + #key; + #name; + #type; // "direct" or "merge_base" + #gitReference; // For direct type + #mergeBaseFrom; // For merge_base type + #targetBranch; // For merge_base type + + get key() { + return this.#key; + } + + get name() { + return this.#name; + } + + get type() { + return this.#type; + } + + get gitReference() { + if (this.#type !== "direct") { + throw new Error(`Revision '${this.#key}' is not a direct reference`); + } + return this.#gitReference; + } + + get mergeBaseFrom() { + if (this.#type !== "merge_base") { + throw new Error(`Revision '${this.#key}' is not a merge_base reference`); + } + return this.#mergeBaseFrom; + } + + get targetBranch() { + if (this.#type !== "merge_base") { + throw new Error(`Revision '${this.#key}' is not a merge_base reference`); + } + return this.#targetBranch; + } + + isDirect() { + return this.#type === "direct"; + } + + isMergeBase() { + return this.#type === "merge_base"; + } +} diff --git a/internal/benchmark/lib/reporters/JsonReporter.js b/internal/benchmark/lib/reporters/JsonReporter.js new file mode 100644 index 00000000000..7ff57365427 --- /dev/null +++ b/internal/benchmark/lib/reporters/JsonReporter.js @@ -0,0 +1,91 @@ +/** + * Generates JSON reports from aggregated benchmark results. + */ +export default class JsonReporter { + /** + * Generate a JSON report from aggregated results. + * + * @param {object} params - Report parameters + * @param {object} params.aggregatedResults - Aggregated benchmark results + * @param {string} params.projectRevision - Git revision of the project being benchmarked + * @param {Date} params.timestamp - Timestamp of the benchmark run + * @returns {string} JSON report as a string + */ + generate({aggregatedResults, projectRevision, timestamp}) { + const report = { + timestamp: timestamp.toISOString(), + projectRevision: projectRevision || null, + revisions: this.#serializeRevisions(aggregatedResults.revisions), + groups: this.#serializeGroups(aggregatedResults.groups), + failures: aggregatedResults.failures.map((failure) => ({ + revisionKey: failure.revisionKey, + revisionName: failure.revisionName, + commitHash: failure.commitHash, + error: failure.error ? failure.error.message : null + })) + }; + + return JSON.stringify(report, null, 2); + } + + /** + * Serialize revision results to plain objects. + * + * @param {Map} revisions - Revision results map + * @returns {object} + */ + #serializeRevisions(revisions) { + const result = {}; + + for (const [revisionKey, revisionResult] of revisions.entries()) { + result[revisionKey] = { + name: revisionResult.name, + commitHash: revisionResult.commitHash, + success: revisionResult.success, + error: revisionResult.error ? revisionResult.error.message : null, + benchmarks: revisionResult.benchmarks.map((b) => ({ + index: b.index, + command: b.command, + displayName: b.displayName, + groupKey: b.groupKey, + result: b.hyperfineResult + })) + }; + } + + return result; + } + + /** + * Serialize group results to plain objects. + * + * @param {Map} groups - Group results map + * @returns {object} + */ + #serializeGroups(groups) { + const result = {}; + + for (const [groupKey, groupResult] of groups.entries()) { + result[groupKey] = { + name: groupResult.groupName, + benchmarks: groupResult.benchmarks.map((b) => { + const revisions = {}; + for (const [revKey, revData] of b.revisions.entries()) { + revisions[revKey] = { + name: revData.revisionName, + commitHash: revData.commitHash, + success: revData.success, + result: revData.result + }; + } + return { + displayName: b.displayName, + revisions + }; + }) + }; + } + + return result; + } +} diff --git a/internal/benchmark/lib/reporters/MarkdownReporter.js b/internal/benchmark/lib/reporters/MarkdownReporter.js new file mode 100644 index 00000000000..17ef7100263 --- /dev/null +++ b/internal/benchmark/lib/reporters/MarkdownReporter.js @@ -0,0 +1,258 @@ +/** + * Generates Markdown reports from aggregated benchmark results. + */ +export default class MarkdownReporter { + /** + * Generate a Markdown report from aggregated results. + * + * @param {object} params - Report parameters + * @param {object} params.aggregatedResults - Aggregated benchmark results + * @param {string} params.projectRevision - Git revision of the project being benchmarked + * @param {string} params.workingDirectory - Working directory where benchmarks ran + * @param {Date} params.timestamp - Timestamp of the benchmark run + * @returns {string} Markdown report + */ + generate({aggregatedResults, projectRevision, workingDirectory, timestamp}) { + const {revisions, groups, failures} = aggregatedResults; + + let markdown = "# Benchmark Results\n\n"; + + // Metadata section + markdown += this.#generateMetadata(timestamp, workingDirectory, projectRevision, revisions); + + // Failures section (if any) + if (failures.length > 0) { + markdown += this.#generateFailuresSection(failures); + } + + // Results by group + for (const [groupKey, groupResult] of groups.entries()) { + markdown += this.#generateGroupSection(groupKey, groupResult, revisions); + } + + // Configuration section + markdown += "## Revisions\n\n"; + for (const [revisionKey, revisionResult] of revisions.entries()) { + markdown += `- **${revisionResult.name}** (\`${revisionKey}\`): \`${revisionResult.commitHash}\``; + if (!revisionResult.success) { + markdown += " ❌ Failed"; + } + markdown += "\n"; + } + markdown += "\n"; + + return markdown; + } + + /** + * Generate metadata section. + * + * @param {Date} timestamp - Benchmark timestamp + * @param {string} workingDirectory - Working directory + * @param {string} projectRevision - Project revision + * @param {Map} revisions - Revision results + * @returns {string} + */ + #generateMetadata(timestamp, workingDirectory, projectRevision, revisions) { + let metadata = "**Generated:** " + timestamp.toISOString() + "\n\n"; + metadata += "**Benchmark Directory:** `" + workingDirectory + "`\n\n"; + + if (projectRevision) { + metadata += "**Project Git Revision:** `" + projectRevision + "`\n\n"; + } + + metadata += "**Revisions Benchmarked:**\n"; + for (const [revisionKey, revisionResult] of revisions.entries()) { + metadata += "- " + revisionResult.name + " (`" + revisionKey + "`): `" + + revisionResult.commitHash + "`\n"; + } + metadata += "\n"; + + return metadata; + } + + /** + * Generate failures section. + * + * @param {Array} failures - List of failures + * @returns {string} + */ + #generateFailuresSection(failures) { + let section = "## ⚠️ Failures\n\n"; + section += "The following revisions encountered errors during benchmarking:\n\n"; + + for (const failure of failures) { + section += `### ${failure.revisionName} (\`${failure.revisionKey}\`)\n\n`; + section += `**Commit:** \`${failure.commitHash}\`\n\n`; + section += "**Error:**\n```\n" + failure.error.message + "\n```\n\n"; + } + + return section; + } + + /** + * Generate a section for a specific group. + * + * @param {string} groupKey - Group key + * @param {object} groupResult - Group result data + * @param {Map} revisions - All revision results + * @returns {string} + */ + #generateGroupSection(groupKey, groupResult, revisions) { + let section = `## ${groupResult.groupName}\n\n`; + + if (groupResult.benchmarks.length === 0) { + section += "*No benchmarks in this group.*\n\n"; + return section; + } + + // Build comparison table + section += this.#generateComparisonTable(groupResult, revisions); + + // Add Mermaid chart + section += this.#generateMermaidChart(groupResult, revisions); + + // Add detailed results + section += this.#generateDetailedResults(groupResult); + + return section; + } + + /** + * Generate comparison table for a group. + * + * @param {object} groupResult - Group result data + * @param {Map} revisions - All revision results + * @returns {string} + */ + #generateComparisonTable(groupResult, revisions) { + // Get all revision keys in order + const revisionKeys = Array.from(revisions.keys()); + + // Build table header + let table = "| Benchmark |"; + for (const revKey of revisionKeys) { + const revResult = revisions.get(revKey); + table += ` ${revResult.name} (s) |`; + } + table += "\n"; + + // Build separator + table += "|-----------|"; + for (let i = 0; i < revisionKeys.length; i++) { + table += "--------------|"; + } + table += "\n"; + + // Build rows + for (const benchmark of groupResult.benchmarks) { + table += `| ${benchmark.displayName} |`; + + for (const revKey of revisionKeys) { + const revData = benchmark.revisions.get(revKey); + if (!revData) { + table += " - |"; + } else if (!revData.success || !revData.result) { + table += " ❌ Failed |"; + } else { + const mean = revData.result.mean.toFixed(3); + const stddev = revData.result.stddev !== null ? revData.result.stddev.toFixed(3) : "N/A"; + table += ` ${mean} ± ${stddev} |`; + } + } + table += "\n"; + } + + table += "\n"; + return table; + } + + /** + * Generate Mermaid chart for a group. + * + * @param {object} groupResult - Group result data + * @param {Map} revisions - All revision results + * @returns {string} + */ + #generateMermaidChart(groupResult, revisions) { + // Collect chart data + const chartData = { + names: [], + values: [] + }; + const revisionKeys = Array.from(revisions.keys()); + + for (const benchmark of groupResult.benchmarks) { + for (const revKey of revisionKeys) { + const revData = benchmark.revisions.get(revKey); + if (revData && revData.success && revData.result) { + chartData.names.push(`${benchmark.displayName} (${revData.revisionName})`); + chartData.values.push(revData.result.mean); + } + } + } + + // Don't generate chart if no valid data + if (chartData.names.length === 0) { + return ""; + } + + // Build x-axis labels (revision name for each benchmark) + const xAxisLabels = chartData.names.map((name) => `"${name}"`).join(", "); + + // Calculate y-axis max value + const yAxisMaxValue = chartData.values.length > 0 ? Math.max(...chartData.values) * 1.1 : 1; + + // Build bar values + const barValues = chartData.values.map((v) => v !== null ? v.toFixed(3) : "0").join(", "); + + let chart = "\n### Performance Comparison Chart\n\n"; + chart += "```mermaid\n"; + chart += "---\n"; + chart += `config:\n`; + chart += ` xyChart:\n`; + chart += ` chartOrientation: "horizontal"\n`; + chart += `---\n`; + chart += "xychart-beta\n"; + chart += ` title "Benchmark Execution Time (seconds)"\n`; + chart += ` x-axis [${xAxisLabels}]\n`; + chart += ` y-axis "Time (seconds)" 0 --> ${yAxisMaxValue.toFixed(1)}\n`; + chart += ` bar [${barValues}]\n`; + chart += "```\n\n"; + + return chart; + } + + /** + * Generate detailed results. + * + * @param {object} groupResult - Group result data + * @returns {string} + */ + #generateDetailedResults(groupResult) { + let section = "### Detailed Results\n\n"; + + // Show results for each benchmark + for (const benchmark of groupResult.benchmarks) { + section += `#### ${benchmark.displayName}\n\n`; + + for (const [revKey, revData] of benchmark.revisions.entries()) { + section += `**${revData.revisionName}** (\`${revKey}\` - \`${revData.commitHash.substring(0, 8)}\`):\n`; + + if (!revData.success || !revData.result) { + section += "- ❌ Failed\n\n"; + continue; + } + + const result = revData.result; + const stddevStr = result.stddev !== null ? `${result.stddev.toFixed(3)}s` : "N/A"; + section += `- Mean: ${result.mean.toFixed(3)}s ± ${stddevStr}\n`; + section += `- Min: ${result.min.toFixed(3)}s\n`; + section += `- Max: ${result.max.toFixed(3)}s\n`; + section += `- Median: ${result.median.toFixed(3)}s\n\n`; + } + } + + return section; + } +} diff --git a/internal/benchmark/lib/services/ExecutionPlanner.js b/internal/benchmark/lib/services/ExecutionPlanner.js new file mode 100644 index 00000000000..d048b45d767 --- /dev/null +++ b/internal/benchmark/lib/services/ExecutionPlanner.js @@ -0,0 +1,108 @@ +/** + * Plans the execution of benchmarks across revisions, creating a deduplicated + * execution matrix. + */ +export default class ExecutionPlanner { + /** + * Plan the execution of all benchmarks across all revisions. + * + * Creates a map where each revision has an array of benchmarks to execute, + * with each benchmark including its group membership information. + * + * @param {Configuration} config - The benchmark configuration + * @param {Map} resolvedRevisions + * Map of revision keys to resolved commit information + * @returns {Map} Execution plan per revision + * + * RevisionPlan = { + * revisionKey: string, + * name: string, + * commitHash: string, + * benchmarks: BenchmarkExecution[] + * } + * + * BenchmarkExecution = { + * index: number, + * command: string, + * prepare: string|null, + * groupMemberships: Array<{groupKey: string, displayName: string}> + * } + */ + plan(config, resolvedRevisions) { + const executionPlan = new Map(); + + // Initialize plan for each revision + for (const [revisionKey, revisionInfo] of resolvedRevisions.entries()) { + executionPlan.set(revisionKey, { + revisionKey, + name: revisionInfo.name, + commitHash: revisionInfo.commitHash, + benchmarks: [] + }); + } + + // Add benchmarks to each applicable revision + for (const benchmark of config.benchmarks) { + for (const [revisionKey, revisionPlan] of executionPlan.entries()) { + // Check if this benchmark should run on this revision + if (benchmark.shouldRunOnRevision(revisionKey)) { + // Build group membership information + const groupMemberships = []; + for (const groupKey of benchmark.getGroupKeys()) { + groupMemberships.push({ + groupKey, + displayName: benchmark.getGroupDisplayName(groupKey) + }); + } + + revisionPlan.benchmarks.push({ + index: benchmark.index, + command: benchmark.command, + prepare: benchmark.prepare, + groupMemberships + }); + } + } + } + + // Verify that each revision has at least one benchmark + for (const [revisionKey, revisionPlan] of executionPlan.entries()) { + if (revisionPlan.benchmarks.length === 0) { + console.warn( + `Warning: Revision '${revisionKey}' (${revisionPlan.name}) ` + + `has no benchmarks assigned to it.` + ); + } + } + + return executionPlan; + } + + /** + * Get a summary of the execution plan for logging/debugging. + * + * @param {Map} executionPlan - The execution plan + * @returns {string} Human-readable summary + */ + getSummary(executionPlan) { + let summary = "Execution Plan:\n"; + + for (const [revisionKey, plan] of executionPlan.entries()) { + summary += `\n ${plan.name} (${revisionKey}): ${plan.commitHash.substring(0, 8)}\n`; + summary += ` ${plan.benchmarks.length} benchmark(s):\n`; + + for (const benchmark of plan.benchmarks) { + const groupNames = benchmark.groupMemberships + .map((gm) => `${gm.groupKey}: "${gm.displayName}"`) + .join(", "); + summary += ` [${benchmark.index}] ui5 ${benchmark.command}`; + if (benchmark.prepare) { + summary += ` (prepare: ${benchmark.prepare})`; + } + summary += `\n Groups: ${groupNames}\n`; + } + } + + return summary; + } +} diff --git a/internal/benchmark/lib/services/HyperfineRunner.js b/internal/benchmark/lib/services/HyperfineRunner.js new file mode 100644 index 00000000000..cd18c99e2b6 --- /dev/null +++ b/internal/benchmark/lib/services/HyperfineRunner.js @@ -0,0 +1,147 @@ +import path from "node:path"; + +/** + * Executes hyperfine benchmarks and manages result files. + */ +export default class HyperfineRunner { + /** + * @param {object} options - Configuration options + * @param {object} options.git - Git utility module + * @param {object} options.npm - npm utility module + * @param {Function} options.spawnProcess - Process spawning function + * @param {string} options.ui5CliPath - Absolute path to the UI5 CLI executable + */ + constructor({git, npm, spawnProcess, ui5CliPath}) { + this.#git = git; + this.#npm = npm; + this.#spawnProcess = spawnProcess; + this.#ui5CliPath = ui5CliPath; + } + + #git; + #npm; + #spawnProcess; + #ui5CliPath; + + /** + * Execute benchmarks for a specific revision. + * + * @param {object} params - Execution parameters + * @param {object} params.revisionPlan - The revision execution plan + * @param {object} params.hyperfineOptions - Hyperfine configuration + * @param {string} params.repositoryPath - Path to the UI5 CLI repository + * @param {string} params.workingDirectory - Working directory for benchmark execution + * @returns {Promise<{success: boolean, resultFilePath: string|null, error: Error|null}>} + */ + async run({revisionPlan, hyperfineOptions, repositoryPath, workingDirectory}) { + const {revisionKey, name, commitHash, benchmarks} = revisionPlan; + const {warmup, runs} = hyperfineOptions; + + console.log(`\n=== Running benchmarks for ${name} (${revisionKey}): ${commitHash} ===\n`); + + try { + if (benchmarks.length === 0) { + console.log(`No benchmarks to run for this revision.`); + return { + success: true, + resultFilePath: null, + error: null + }; + } + + // Checkout the revision + console.log(`Checking out ${commitHash}...`); + await this.#git.checkout(commitHash, repositoryPath); + + // Install dependencies + console.log(`Running npm ci...`); + await this.#npm.ci(repositoryPath); + + // Build hyperfine arguments + const args = [ + "--warmup", String(warmup), + "--runs", String(runs) + ]; + + // Add each benchmark as a separate command to hyperfine + for (const benchmark of benchmarks) { + const commandName = this.#buildCommandName(name, revisionKey, benchmark); + const fullCommand = `node ${this.#ui5CliPath} ${benchmark.command}`; + + // Add prepare command (empty string if none) + args.push("--prepare", benchmark.prepare || ""); + + // Add the benchmark command + args.push("--command-name", commandName, fullCommand); + } + + // Define result file path + const resultFileName = `benchmark-results-${revisionKey}-${commitHash}.json`; + const resultFilePath = path.resolve(workingDirectory, resultFileName); + args.push("--export-json", resultFilePath); + + console.log(`Executing hyperfine with ${benchmarks.length} benchmark(s)...`); + console.log(`Command: hyperfine ${args.join(" ")}\n`); + + // Execute hyperfine + await this.#spawnProcess("hyperfine", args, { + cwd: workingDirectory, + stdio: "inherit", + captureOutput: false, + env: { + // Inherit all other environment variables + ...process.env, + // Disable invocation of local UI5 CLI installations + UI5_CLI_NO_LOCAL: "X", + }, + errorMessage: "hyperfine exited with non-zero code" + }); + + console.log(`\n✅ Benchmarks completed successfully for ${name}\n`); + + return { + success: true, + resultFilePath, + error: null + }; + } catch (error) { + console.error(`\n❌ Benchmarks failed for ${name}: ${error.message}\n`); + + // Check for specific hyperfine installation error + if (error.message.includes("Failed to execute hyperfine")) { + const installError = new Error( + "hyperfine is required but not installed. " + + "Please install it (e.g., 'brew install hyperfine' on macOS)." + ); + return { + success: false, + resultFilePath: null, + error: installError + }; + } + + return { + success: false, + resultFilePath: null, + error + }; + } + } + + /** + * Build a command name for hyperfine output. + * + * @param {string} revisionName - Display name of the revision + * @param {string} revisionKey - Revision key + * @param {object} benchmark - Benchmark execution object + * @returns {string} + */ + #buildCommandName(revisionName, revisionKey, benchmark) { + // Use the first group's display name as the primary identifier + const primaryDisplayName = benchmark.groupMemberships.length > 0 ? + benchmark.groupMemberships[0].displayName : + `ui5 ${benchmark.command}`; + + return `${revisionName} (${revisionKey}): ${primaryDisplayName}`; + } +} diff --git a/internal/benchmark/lib/services/ResultAggregator.js b/internal/benchmark/lib/services/ResultAggregator.js new file mode 100644 index 00000000000..61b7bd2c7d3 --- /dev/null +++ b/internal/benchmark/lib/services/ResultAggregator.js @@ -0,0 +1,209 @@ +/** + * Aggregates benchmark results from hyperfine JSON files and organizes them by groups. + */ +export default class ResultAggregator { + /** + * @param {object} fs - File system module (for dependency injection) + */ + constructor(fs) { + this.#fs = fs; + } + + #fs; + + /** + * Aggregate results from all revisions and organize by groups. + * + * @param {object} params - Aggregation parameters + * @param {Configuration} params.config - The benchmark configuration + * @param {Map} params.executionPlan - The execution plan + * @param {Map} params.executionResults - Results from HyperfineRunner + * @returns {AggregatedResults} + * + * AggregatedResults = { + * revisions: Map, + * groups: Map, + * failures: Array + * } + * + * RevisionResult = { + * revisionKey: string, + * name: string, + * commitHash: string, + * success: boolean, + * error: Error|null, + * benchmarks: Array + * } + * + * BenchmarkResult = { + * index: number, + * command: string, + * displayName: string, + * groupKey: string, + * hyperfineResult: object|null + * } + * + * GroupResult = { + * groupKey: string, + * groupName: string, + * benchmarks: Array + * } + * + * GroupBenchmarkResult = { + * displayName: string, + * revisions: Map + * } + */ + aggregate({config, executionPlan, executionResults}) { + const revisionResults = new Map(); + const failures = []; + + // Process each revision's results + for (const [revisionKey, executionResult] of executionResults.entries()) { + const revisionPlan = executionPlan.get(revisionKey); + + if (!executionResult.success) { + // Record failure + failures.push({ + revisionKey, + revisionName: revisionPlan.name, + commitHash: revisionPlan.commitHash, + error: executionResult.error + }); + + revisionResults.set(revisionKey, { + revisionKey, + name: revisionPlan.name, + commitHash: revisionPlan.commitHash, + success: false, + error: executionResult.error, + benchmarks: [] + }); + continue; + } + + // Read and parse the result file + let hyperfineData = null; + if (executionResult.resultFilePath) { + try { + const fileContents = this.#fs.readFileSync(executionResult.resultFilePath, "utf8"); + hyperfineData = JSON.parse(fileContents); + } catch (error) { + failures.push({ + revisionKey, + revisionName: revisionPlan.name, + commitHash: revisionPlan.commitHash, + error: new Error(`Failed to read result file: ${error.message}`) + }); + + revisionResults.set(revisionKey, { + revisionKey, + name: revisionPlan.name, + commitHash: revisionPlan.commitHash, + success: false, + error: new Error(`Failed to read result file: ${error.message}`), + benchmarks: [] + }); + continue; + } + } + + // Map hyperfine results to benchmarks (index-based mapping) + const benchmarkResults = []; + for (let i = 0; i < revisionPlan.benchmarks.length; i++) { + const benchmark = revisionPlan.benchmarks[i]; + const hyperfineResult = hyperfineData && hyperfineData.results ? + hyperfineData.results[i] : + null; + + // Create a result entry for each group this benchmark belongs to + for (const groupMembership of benchmark.groupMemberships) { + benchmarkResults.push({ + index: benchmark.index, + command: benchmark.command, + displayName: groupMembership.displayName, + groupKey: groupMembership.groupKey, + hyperfineResult + }); + } + } + + revisionResults.set(revisionKey, { + revisionKey, + name: revisionPlan.name, + commitHash: revisionPlan.commitHash, + success: true, + error: null, + benchmarks: benchmarkResults + }); + } + + // Organize results by groups + const groupResults = this.#organizeByGroups(config, revisionResults); + + return { + revisions: revisionResults, + groups: groupResults, + failures + }; + } + + /** + * Organize benchmark results by groups for comparison. + * + * @param {Configuration} config - The benchmark configuration + * @param {Map} revisionResults - Results per revision + * @returns {Map} + */ + #organizeByGroups(config, revisionResults) { + const groupResults = new Map(); + + // Initialize group results + for (const groupKey of config.getGroupKeys()) { + const group = config.getGroup(groupKey); + groupResults.set(groupKey, { + groupKey, + groupName: group.name, + benchmarks: new Map() // Map + }); + } + + // Populate group results with benchmark data + for (const [revisionKey, revisionResult] of revisionResults.entries()) { + for (const benchmarkResult of revisionResult.benchmarks) { + const groupResult = groupResults.get(benchmarkResult.groupKey); + if (!groupResult) { + continue; // Skip if group not found (shouldn't happen) + } + + // Get or create benchmark entry in this group + if (!groupResult.benchmarks.has(benchmarkResult.displayName)) { + groupResult.benchmarks.set(benchmarkResult.displayName, { + displayName: benchmarkResult.displayName, + revisions: new Map() + }); + } + + const groupBenchmark = groupResult.benchmarks.get(benchmarkResult.displayName); + groupBenchmark.revisions.set(revisionKey, { + revisionName: revisionResult.name, + commitHash: revisionResult.commitHash, + success: revisionResult.success, + result: benchmarkResult.hyperfineResult + }); + } + } + + // Convert benchmark maps to arrays for easier iteration + for (const groupResult of groupResults.values()) { + groupResult.benchmarks = Array.from(groupResult.benchmarks.values()); + } + + return groupResults; + } +} diff --git a/internal/benchmark/lib/services/RevisionResolver.js b/internal/benchmark/lib/services/RevisionResolver.js new file mode 100644 index 00000000000..b3f3e0a2ce4 --- /dev/null +++ b/internal/benchmark/lib/services/RevisionResolver.js @@ -0,0 +1,62 @@ +/** + * Resolves revision configurations to concrete git commit hashes. + */ +export default class RevisionResolver { + /** + * @param {object} git - Git utility module (for dependency injection) + * @param {Function} git.getMergeBaseRevision - Get merge base between two branches + * @param {Function} git.getBranchRevision - Get commit hash for a branch + */ + constructor(git) { + this.#git = git; + } + + #git; + + /** + * Resolve all revisions from the configuration to commit hashes. + * + * @param {Configuration} config - The benchmark configuration + * @param {string} repositoryPath - Absolute path to the git repository + * @returns {Promise>} + * Map of revision keys to resolved commit information + */ + async resolveAll(config, repositoryPath) { + const resolvedRevisions = new Map(); + + for (const revisionKey of config.getRevisionKeys()) { + const revision = config.getRevision(revisionKey); + const commitHash = await this.#resolveRevision(revision, repositoryPath); + + resolvedRevisions.set(revisionKey, { + name: revision.name, + commitHash + }); + } + + return resolvedRevisions; + } + + /** + * Resolve a single revision to a commit hash. + * + * @param {Revision} revision - The revision to resolve + * @param {string} repositoryPath - Absolute path to the git repository + * @returns {Promise} The resolved commit hash + */ + async #resolveRevision(revision, repositoryPath) { + if (revision.isDirect()) { + // Direct git reference (branch, tag, or commit) + return await this.#git.getBranchRevision(revision.gitReference, repositoryPath); + } else if (revision.isMergeBase()) { + // Merge base calculation + return await this.#git.getMergeBaseRevision( + revision.targetBranch, + revision.mergeBaseFrom, + repositoryPath + ); + } else { + throw new Error(`Unknown revision type for '${revision.key}'`); + } + } +} diff --git a/internal/benchmark/lib/utils/git.js b/internal/benchmark/lib/utils/git.js new file mode 100644 index 00000000000..b9f3e34ef7e --- /dev/null +++ b/internal/benchmark/lib/utils/git.js @@ -0,0 +1,46 @@ +import {spawnProcess} from "./process.js"; + +// Execute a git command and return stdout +async function runGitCommand(args, cwd, commandName = args[0], spawnFn = spawnProcess) { + return spawnFn("git", args, { + cwd, + errorMessage: `git ${commandName} failed` + }); +} + +// Check if the git repository is dirty (has uncommitted changes) +async function checkGitStatus(cwd, spawnFn = spawnProcess) { + return runGitCommand(["status", "--porcelain"], cwd, "status", spawnFn); +} + +// Get the merge base revision between target and comparison branches +async function getMergeBaseRevision(targetBranch, comparisonBranch, cwd, spawnFn = spawnProcess) { + return runGitCommand(["merge-base", targetBranch, comparisonBranch], cwd, "merge-base", spawnFn); +} + +// Get the revision hash for a branch +async function getBranchRevision(branch, cwd, spawnFn = spawnProcess) { + return runGitCommand(["rev-parse", branch], cwd, "rev-parse", spawnFn); +} + +// Get the git revision of a directory (if it's a git repository) +async function getProjectRevision(cwd, spawnFn = spawnProcess) { + try { + return await runGitCommand(["rev-parse", "HEAD"], cwd, "rev-parse", spawnFn); + } catch { + return null; // Not a git repository or error + } +} + +// Checkout a specific git revision / branch +async function checkout(revision, cwd, spawnFn = spawnProcess) { + await runGitCommand(["checkout", revision], cwd, "checkout", spawnFn); +} + +export default { + checkGitStatus, + getMergeBaseRevision, + getBranchRevision, + getProjectRevision, + checkout +}; diff --git a/internal/benchmark/lib/utils/npm.js b/internal/benchmark/lib/utils/npm.js new file mode 100644 index 00000000000..8ee8550d70f --- /dev/null +++ b/internal/benchmark/lib/utils/npm.js @@ -0,0 +1,18 @@ +import {spawnProcess} from "./process.js"; + +// Execute a npm command and return stdout +async function runNpmCommand(args, cwd, commandName = args[0], spawnFn = spawnProcess) { + return spawnFn("npm", args, { + cwd, + errorMessage: `npm ${commandName} failed` + }); +} + +// Checkout a specific git revision / branch +async function ci(cwd, spawnFn = spawnProcess) { + await runNpmCommand(["ci"], cwd, "ci", spawnFn); +} + +export default { + ci +}; diff --git a/internal/benchmark/lib/utils/process.js b/internal/benchmark/lib/utils/process.js new file mode 100644 index 00000000000..a6862093e48 --- /dev/null +++ b/internal/benchmark/lib/utils/process.js @@ -0,0 +1,47 @@ +import {spawn} from "node:child_process"; + +// Generic function to spawn a process +export async function spawnProcess(command, args, options = {}) { + const { + cwd, + stdio = "pipe", + env = process.env, + captureOutput = true, + errorMessage = `${command} failed` + } = options; + + return new Promise((resolve, reject) => { + const childProcess = spawn(command, args, { + stdio, + cwd, + env + }); + + let stdout = ""; + let stderr = ""; + + if (captureOutput && childProcess.stdout) { + childProcess.stdout.on("data", (data) => { + stdout += data.toString(); + }); + } + + if (captureOutput && childProcess.stderr) { + childProcess.stderr.on("data", (data) => { + stderr += data.toString(); + }); + } + + childProcess.on("close", (code) => { + if (code === 0) { + resolve(stdout.trim()); + } else { + reject(new Error(captureOutput ? `${errorMessage}: ${stderr}` : `${errorMessage} (exit code ${code})`)); + } + }); + + childProcess.on("error", (error) => { + reject(new Error(`Failed to execute ${command}: ${error.message}`)); + }); + }); +} diff --git a/internal/benchmark/package.json b/internal/benchmark/package.json new file mode 100644 index 00000000000..bdbded5e658 --- /dev/null +++ b/internal/benchmark/package.json @@ -0,0 +1,24 @@ +{ + "name": "@ui5-internal/benchmark", + "private": true, + "license": "Apache-2.0", + "type": "module", + "bin": { + "ui5-cli-benchmark": "./cli.js" + }, + "scripts": { + "test": "npm run lint && npm run coverage && npm run depcheck", + "unit": "node --test 'test/e2e/**/*.js'", + "unit-watch": "node --test --watch 'test/e2e/**/*.js'", + "coverage": "node --test --experimental-test-coverage 'test/e2e/**/*.js'", + "lint": "eslint .", + "depcheck": "depcheck" + }, + "dependencies": { + "js-yaml": "^4.1.1" + }, + "devDependencies": { + "depcheck": "^1.4.7", + "eslint": "^9.39.1" + } +} diff --git a/internal/benchmark/test/e2e/BenchmarkRunner.js b/internal/benchmark/test/e2e/BenchmarkRunner.js new file mode 100644 index 00000000000..ef4d3a99c67 --- /dev/null +++ b/internal/benchmark/test/e2e/BenchmarkRunner.js @@ -0,0 +1,306 @@ +import {test, describe} from "node:test"; +import path from "node:path"; +import BenchmarkRunner from "../../lib/BenchmarkRunner.js"; + +const testConfig = ` +revisions: + baseline: + name: "Baseline" + revision: + merge_base_from: "feat/example-feature" + target_branch: "main" + example_feature: + name: "Example Feature" + revision: "feat/example-feature" + +hyperfine: + warmup: 1 + runs: 10 + +groups: + build: + name: "ui5 build" + +benchmarks: + + - command: "build" + groups: + build: + name: "build" +`; + +function createMockFs(mockFileSystem = new Map()) { + return { + readFileSync(filePath, encoding) { + if (filePath === "benchmark-config.yaml" && encoding === "utf8") { + return testConfig; + } else if (filePath === "benchmark-results-baseline-merge-base-revision-hash.json") { + return JSON.stringify({results: [ + { + command: "Baseline: ui5 build", + mean: 5.123, + stddev: 0.234, + min: 4.890, + max: 5.456, + median: 5.100 + }, + ]}); + } else if (filePath === "benchmark-results-comparison-branch-revision-hash.json") { + return JSON.stringify({results: [ + { + command: "Comparison: ui5 build", + mean: 4.567, + stddev: 0.123, + min: 4.444, + max: 4.690, + median: 4.560 + }, + ]}); + } else if (mockFileSystem.has(filePath)) { + return mockFileSystem.get(filePath); + } else { + const error = new Error(`File not found: ${filePath}`); + error.code = "ENOENT"; + throw error; + } + }, + writeFileSync(filePath, content, encoding) { + mockFileSystem.set(filePath, content); + } + }; +} + +function createMockGit({dirty = false} = {}) { + return { + async checkGitStatus() { + return dirty ? "M modified-file.js" : ""; + }, + async getMergeBaseRevision() { + return "merge-base-revision-hash"; + }, + async getBranchRevision() { + return "branch-revision-hash"; + }, + async getProjectRevision() { + return "project-revision-hash"; + }, + async checkout() { + return; + } + }; +} + +function createMockNpm() { + return { + async ci() { + return; + } + }; +} + +function createMockSpawnProcess(fn) { + return async function mockSpawnProcess(command, args, options) { + return fn(command, args, options); + }; +} + +describe("BenchmarkRunner (e2e)", () => { + const mocks = { + fs: createMockFs(), + git: createMockGit(), + npm: createMockNpm(), + spawnProcess: createMockSpawnProcess(), + timestamp: new Date() + }; + + test("should throw error when no config file provided", async ({assert}) => { + try { + const runner = new BenchmarkRunner({ + ...mocks + }); + await runner.run({ + timestamp: mocks.timestamp + }); + assert.fail("Command should throw an error"); + } catch (error) { + assert.equal(error.message, "Configuration file path must be a non-empty string"); + } + }); + + test("should throw error when config file not found", async ({assert}) => { + try { + const runner = new BenchmarkRunner({ + ...mocks + }); + await runner.run({ + configFilePath: "does-not-exist.yaml", + timestamp: mocks.timestamp + }); + assert.fail("Command should throw an error"); + } catch (error) { + assert.equal(error.message, "Configuration file not found: does-not-exist.yaml"); + } + }); + + test("should throw error when repository is dirty", async ({assert}) => { + try { + const runner = new BenchmarkRunner({ + ...mocks, + git: createMockGit({dirty: true}), + }); + await runner.run({ + configFilePath: "benchmark-config.yaml", + timestamp: mocks.timestamp + }); + assert.fail("Should have exited"); + } catch (error) { + assert.equal(error.message, + "Repository has uncommitted changes. " + + "Please commit or stash your changes before running benchmarks." + ); + } + }); + + test("should successfully run benchmarks with default directory", async ({assert}) => { + const mockFileSystem = new Map(); + + const runner = new BenchmarkRunner({ + ...mocks, + fs: createMockFs(mockFileSystem), + spawnProcess: createMockSpawnProcess(() => { + mockFileSystem.set( + path.resolve(process.cwd(), "benchmark-results-baseline-merge-base-revision-hash.json"), + JSON.stringify({ + results: [ + { + command: "Baseline: ui5 build", + mean: 5.123, + stddev: 0.234, + min: 4.890, + max: 5.456, + median: 5.100 + }, + ] + }) + ); + mockFileSystem.set( + path.resolve(process.cwd(), "benchmark-results-example_feature-branch-revision-hash.json"), + JSON.stringify({ + results: [ + { + command: "Example Feature: ui5 build", + mean: 4.567, + stddev: 0.123, + min: 4.444, + max: 4.690, + median: 4.560 + }, + ] + }) + ); + return; + }) + }); + await runner.run({ + configFilePath: "benchmark-config.yaml", + timestamp: mocks.timestamp + }); + + // Verify that summary file was written + const summaryFilePath = path.resolve(process.cwd(), `benchmark-summary-${mocks.timestamp.toISOString().replace(/[:.]/g, "-")}.md`); + assert.ok(mockFileSystem.has(summaryFilePath), "Summary file should be written"); + + // Verify that results file was written + const resultsFilePath = path.resolve(process.cwd(), `benchmark-results-${mocks.timestamp.toISOString().replace(/[:.]/g, "-")}.json`); + assert.ok(mockFileSystem.has(resultsFilePath), "Results file should be written"); + assert.deepStrictEqual(JSON.parse(mockFileSystem.get(resultsFilePath)), { + "timestamp": mocks.timestamp.toISOString(), + "projectRevision": "project-revision-hash", + "revisions": { + "baseline": { + "name": "Baseline", + "commitHash": "merge-base-revision-hash", + "success": true, + "error": null, + "benchmarks": [ + { + "index": 0, + "command": "build", + "displayName": "build", + "groupKey": "build", + "result": { + "command": "Baseline: ui5 build", + "max": 5.456, + "mean": 5.123, + "median": 5.1, + "min": 4.89, + "stddev": 0.234 + }, + } + ] + }, + "example_feature": { + "name": "Example Feature", + "commitHash": "branch-revision-hash", + "success": true, + "error": null, + "benchmarks": [ + { + "index": 0, + "command": "build", + "displayName": "build", + "groupKey": "build", + "result": { + "command": "Example Feature: ui5 build", + "mean": 4.567, + "stddev": 0.123, + "min": 4.444, + "max": 4.690, + "median": 4.560 + } + } + ] + } + }, + "groups": { + "build": { + "name": "ui5 build", + "benchmarks": [ + { + "displayName": "build", + "revisions": { + "baseline": { + "name": "Baseline", + "commitHash": "merge-base-revision-hash", + "success": true, + "result": { + "command": "Baseline: ui5 build", + "max": 5.456, + "mean": 5.123, + "median": 5.1, + "min": 4.89, + "stddev": 0.234 + }, + }, + "example_feature": { + "name": "Example Feature", + "commitHash": "branch-revision-hash", + "success": true, + "result": { + "command": "Example Feature: ui5 build", + "mean": 4.567, + "stddev": 0.123, + "min": 4.444, + "max": 4.690, + "median": 4.560 + } + } + } + } + ] + } + }, + "failures": [] + }); + }); +}); diff --git a/package-lock.json b/package-lock.json index dcbc851af43..20ba5703703 100644 --- a/package-lock.json +++ b/package-lock.json @@ -31,6 +31,20 @@ "npm": ">= 8" } }, + "internal/benchmark": { + "name": "@ui5-internal/benchmark", + "license": "Apache-2.0", + "dependencies": { + "js-yaml": "^4.1.1" + }, + "bin": { + "ui5-cli-benchmark": "cli.js" + }, + "devDependencies": { + "depcheck": "^1.4.7", + "eslint": "^9.39.1" + } + }, "internal/documentation": { "name": "@ui5/documentation", "version": "0.0.1", @@ -5036,6 +5050,10 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@ui5-internal/benchmark": { + "resolved": "internal/benchmark", + "link": true + }, "node_modules/@ui5/builder": { "resolved": "packages/builder", "link": true