diff --git a/.github/actions/conformance/action.yml b/.github/actions/conformance/action.yml
new file mode 100644
index 000000000..a183e189f
--- /dev/null
+++ b/.github/actions/conformance/action.yml
@@ -0,0 +1,110 @@
+name: "Run protobuf conformance"
+description: "Build and run the upstream Protocol Buffers conformance suite for protobuf.js."
+inputs:
+ upstream-version:
+ description: "Upstream protocolbuffers/protobuf tag or branch to test against."
+ default: "v33.0"
+ upstream-dir:
+ description: "Directory where upstream protobuf is checked out."
+ default: "tests/conformance/upstream"
+ runner-cache:
+ description: "Directory used to cache the built upstream conformance runner."
+ default: "tests/conformance/runner-cache"
+ output-dir:
+ description: "Directory where conformance logs and generated reports are written."
+ default: "tests/conformance/out"
+ maximum-edition:
+ description: "Maximum protobuf edition passed to the conformance runner."
+ default: "2024"
+outputs:
+ exit-code:
+ description: "Exit code returned by the upstream conformance runner."
+ value: ${{ steps.run-conformance.outputs.exit_code }}
+runs:
+ using: "composite"
+ steps:
+ - name: "Prepare conformance runner cache"
+ shell: bash
+ run: mkdir -p "${{ inputs.runner-cache }}"
+
+ - uses: actions/cache@v5
+ id: conformance-runner-cache
+ with:
+ path: ${{ inputs.runner-cache }}
+ key: protobuf-conformance-runner-${{ runner.os }}-${{ inputs.upstream-version }}
+
+ - name: "Check out upstream protobuf"
+ shell: bash
+ run: git clone --depth 1 --branch "${{ inputs.upstream-version }}" https://github.com/protocolbuffers/protobuf.git "${{ inputs.upstream-dir }}"
+
+ - name: "Install build dependencies"
+ if: steps.conformance-runner-cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y cmake ninja-build
+
+ - name: "Build upstream conformance runner"
+ if: steps.conformance-runner-cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: |
+ cmake -S "${{ inputs.upstream-dir }}" -B "${{ inputs.runner-cache }}/build" -G Ninja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -Dprotobuf_BUILD_CONFORMANCE=ON \
+ -Dprotobuf_BUILD_TESTS=OFF \
+ -Dprotobuf_BUILD_EXAMPLES=OFF \
+ -Dprotobuf_INSTALL=OFF \
+ -Dprotobuf_FORCE_FETCH_DEPENDENCIES=ON
+ cmake --build "${{ inputs.runner-cache }}/build" --target conformance_test_runner --parallel 2
+
+ - name: "Generate protobuf.js conformance target"
+ shell: bash
+ env:
+ PROTOBUF_UPSTREAM: ${{ inputs.upstream-dir }}
+ run: node tests/conformance/generate.js
+
+ - name: "Locate Node.js"
+ id: node
+ shell: bash
+ run: echo "path=$(command -v node)" >> "$GITHUB_OUTPUT"
+
+ - name: "List conformance tests"
+ shell: bash
+ run: |
+ mkdir -p "${{ inputs.output-dir }}"
+ : > "${{ inputs.output-dir }}/failure_list.txt"
+ "${{ inputs.runner-cache }}/build/conformance_test_runner" \
+ --maximum_edition "${{ inputs.maximum-edition }}" \
+ --enforce_recommended \
+ --verbose \
+ --failure_list "${{ inputs.output-dir }}/failure_list.txt" \
+ --output_dir "${{ inputs.output-dir }}" \
+ "${{ steps.node.outputs.path }}" tests/conformance/testee.js --list \
+ > "${{ inputs.output-dir }}/conformance-tests.log" 2>&1
+
+ - name: "Run conformance suite"
+ id: run-conformance
+ shell: bash
+ run: |
+ set +e
+ "${{ inputs.runner-cache }}/build/conformance_test_runner" \
+ --maximum_edition "${{ inputs.maximum-edition }}" \
+ --enforce_recommended \
+ --failure_list "${{ inputs.output-dir }}/failure_list.txt" \
+ --output_dir "${{ inputs.output-dir }}" \
+ "${{ steps.node.outputs.path }}" tests/conformance/testee.js \
+ > "${{ inputs.output-dir }}/conformance.log" 2>&1
+ status=$?
+ set -e
+ echo "exit_code=$status" >> "$GITHUB_OUTPUT"
+ tail -n 80 "${{ inputs.output-dir }}/conformance.log"
+ if [ -s "${{ inputs.output-dir }}/failing_tests.txt" ]; then
+ echo ""
+ echo "Failing conformance tests:"
+ cat "${{ inputs.output-dir }}/failing_tests.txt"
+ fi
+
+ - name: "Summarize conformance results"
+ if: always()
+ shell: bash
+ run: node tests/conformance/report.js "${{ inputs.output-dir }}/conformance.log" "${{ inputs.output-dir }}/conformance-tests.log" --json "${{ inputs.output-dir }}/conformance.json" | tee -a "$GITHUB_STEP_SUMMARY"
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index aeef68c64..d21c797b6 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -41,7 +41,7 @@ jobs:
run: npm run test:sources
- name: "Test types"
run: npm run test:types
- bench:
+ benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
@@ -51,4 +51,33 @@ jobs:
- name: "Install dependencies"
run: npm install
- name: "Run benchmark"
- run: npm run bench
+ run: |
+ set -o pipefail
+ npm run bench 2>&1 | tee "$RUNNER_TEMP/bench.log"
+ {
+ echo '```'
+ sed -r 's/\x1b\[[0-9;]*m//g' "$RUNNER_TEMP/bench.log"
+ echo '```'
+ } >> "$GITHUB_STEP_SUMMARY"
+ conformance:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v6
+ - uses: actions/setup-node@v6
+ with:
+ node-version: "lts/*"
+ - name: "Install dependencies"
+ run: npm install --ignore-scripts
+ - name: "Install CLI dependencies"
+ run: npm --prefix cli install --ignore-scripts
+ - uses: ./.github/actions/conformance
+ - uses: actions/upload-artifact@v7
+ if: always()
+ with:
+ name: conformance-results
+ path: |
+ tests/conformance/out/conformance.json
+ tests/conformance/out/conformance.log
+ tests/conformance/out/conformance-tests.log
+ tests/conformance/out/failing_tests.txt
+ if-no-files-found: ignore
diff --git a/README.md b/README.md
index 9e4e9a194..0b191ca39 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
**Protocol Buffers** are a language-neutral, platform-neutral, extensible way of serializing structured data for use in communications protocols, data storage, and more, originally designed at Google ([see](https://protobuf.dev/)).
-**protobuf.js** is a freestanding JavaScript implementation of Protocol Buffers with TypeScript support for Node.js and the browser. It works with `.proto` files out of the box and can generate optimized encoders and decoders at runtime or emit them statically.
+**protobuf.js** is a standalone JavaScript implementation of Protocol Buffers with TypeScript support for Node.js and the browser. It works with `.proto` files out of the box, is optimized for fast binary I/O, and supports runtime reflection as well as static code generation.
## Getting started
@@ -25,7 +25,7 @@ The [command line utility](./cli/) for generating reflection bundles, static cod
npm install --save-dev protobufjs-cli
```
-The CLI is a small but capable standalone protobuf.js toolchain. It does not require `protoc`.
+The CLI is a small but capable standalone protobuf.js toolchain. It does not require `protoc` or a language plugin.
### Choose a runtime
@@ -35,7 +35,7 @@ The CLI is a small but capable standalone protobuf.js toolchain. It does not req
| `protobufjs/light.js` | Reflection | You load JSON bundles or build schemas programmatically
| `protobufjs/minimal.js` | Static runtime | You only use generated static code
-Builds with reflection include just-in-time code generation. Use the CLI to emit the same optimized code ahead of time and run it on the minimal runtime. The full build includes the light build, and the light build includes the minimal runtime.
+Reflection builds generate specialized code at runtime. The CLI can emit the same optimized code ahead of time for the minimal runtime. The full build includes the light build, and the light build includes the minimal runtime.
### Browser builds
@@ -50,7 +50,7 @@ Pick the distribution matching your runtime variant and pin an exact version:
```
-Browser builds support CommonJS and AMD loaders and export globally as `window.protobuf`. Native ESM support is planned for a future release.
+Browser builds support CommonJS and AMD loaders and export globally as `window.protobuf`.
## Usage
@@ -99,7 +99,7 @@ const decoded = AwesomeMessage.decode(encoded);
Plain objects can be encoded directly when they already use protobuf.js runtime types: numbers for 32-bit numeric fields, booleans for `bool`, strings for `string`, `Uint8Array` or `Buffer` for `bytes`, arrays for repeated fields, and plain objects for maps. Map keys are the string representation of the respective value or an 8-character hash string for 64-bit/`Long` keys. Use `fromObject` when input may use broader JSON-style forms such as enum names, base64 strings for bytes, or decimal strings for 64-bit values.
-Install [`long`](https://github.com/dcodeIO/long.js) with protobuf.js when exact 64-bit integer support is required. Native `BigInt` support is planned for a future release.
+Install [`long`](https://github.com/dcodeIO/long.js) with protobuf.js when exact 64-bit integer support is required.
### Convert plain objects
@@ -112,6 +112,19 @@ const object = AwesomeMessage.toObject(message, {
});
```
+Common `ConversionOptions` are:
+
+| Option | Effect |
+|--------|--------|
+| `longs: String` | Converts 64-bit values to decimal strings |
+| `longs: Number` | Converts 64-bit values to JS numbers (may lose precision) |
+| `enums: String` | Converts enum values to names |
+| `bytes: String` | Converts bytes to base64 strings |
+| `defaults: true` | Includes default values for unset fields |
+| `arrays: true` | Includes empty arrays for repeated fields |
+| `objects: true` | Includes empty objects for map fields |
+| `oneofs: true` | Includes virtual oneof discriminator properties |
+
## Message API
Message types expose focused methods for validation, conversion, and binary I/O.
@@ -126,18 +139,7 @@ Message types expose focused methods for validation, conversion, and binary I/O.
Converts broader JavaScript input into a message instance.
* **toObject**(message: `Message`, options?: `ConversionOptions`): `object`
- Converts a message instance to a plain object for JSON or interoperability. Common options:
-
- | Option | Effect |
- |--------|--------|
- | `longs: String` | Converts 64-bit values to decimal strings |
- | `longs: Number` | Converts 64-bit values to JS numbers (may lose precision) |
- | `enums: String` | Converts enum values to names |
- | `bytes: String` | Converts bytes to base64 strings |
- | `defaults: true` | Includes default values for unset fields |
- | `arrays: true` | Includes empty arrays for repeated fields |
- | `objects: true` | Includes empty objects for map fields |
- | `oneofs: true` | Includes virtual oneof discriminator properties |
+ Converts a message instance to a plain object for JSON or interoperability.
* **encode**(message: `Message | object`, writer?: `Writer`): `Writer`
Encodes a message or equivalent plain object. Call `.finish()` on the returned writer to obtain a buffer.
@@ -298,11 +300,15 @@ For protobuf descriptor interoperability, see [ext/descriptor](./ext/descriptor)
In [CSP](https://w3c.github.io/webappsec-csp/)-restricted environments that disallow unsafe-eval, use generated static code instead of runtime code generation.
+## Conformance
+
+protobuf.js targets full binary wire-format conformance for **Proto2**, **Proto3** and **Editions**. CI runs the official Protocol Buffers conformance suite, with logs uploaded as artifacts.
+
## Performance
-Both protobuf.js reflection and static modes execute specialized encoder and decoder functions generated for each message type instead of a generic descriptor-walking interpreter.
+In both reflection and static modes, protobuf.js builds specialized encoders and decoders for each message type instead of interpreting descriptors at runtime.
-The repository includes a small benchmark for the bundled fixture in [`bench/`](./bench/). It compares protobuf.js reflection and static code against native `JSON.stringify`/`JSON.parse` and [google-protobuf](https://www.npmjs.com/package/google-protobuf). Results depend on hardware, Node.js version, and the message shape, so they should be treated as indicative rather than absolute.
+The repository includes a small benchmark for the bundled fixture in [`bench/`](./bench/). It compares protobuf.js reflection and static code against native `JSON.stringify`/`JSON.parse` and [google-protobuf](https://www.npmjs.com/package/google-protobuf) (`protoc-gen-js`). Results depend on hardware, Node.js version, and the message shape, so they should be treated as indicative rather than absolute.
One run on an AMD Ryzen 9 9950X3D with Node.js 24.13.0 and google-protobuf 4.0.2 produced:
diff --git a/tests/conformance/.gitignore b/tests/conformance/.gitignore
new file mode 100644
index 000000000..9b97786fb
--- /dev/null
+++ b/tests/conformance/.gitignore
@@ -0,0 +1,3 @@
+generated/
+out/
+upstream/
diff --git a/tests/conformance/check.js b/tests/conformance/check.js
new file mode 100644
index 000000000..9d9dacb93
--- /dev/null
+++ b/tests/conformance/check.js
@@ -0,0 +1,45 @@
+"use strict";
+
+var fs = require("fs");
+
+var args = process.argv.slice(2),
+ file = args[0],
+ binaryThreshold = 100,
+ report,
+ binary,
+ binaryPercent;
+
+for (var i = 1; i < args.length; ++i) {
+ if (args[i] === "--binary")
+ binaryThreshold = Number(args[++i]);
+}
+
+if (!file || !isFinite(binaryThreshold)) {
+ console.error("usage: node tests/conformance/check.js [--binary ]");
+ process.exit(1);
+}
+
+if (!fs.existsSync(file)) {
+ console.error("missing conformance summary: " + file);
+ process.exit(1);
+}
+
+report = JSON.parse(fs.readFileSync(file, "utf8"));
+binary = report.totals && report.totals.byFormat && report.totals.byFormat.binary;
+if (!binary || !binary.total) {
+ console.error("missing Binary conformance results in " + file);
+ process.exit(1);
+}
+
+binaryPercent = binary.passPercent * 100;
+if (binaryPercent + 1e-9 < binaryThreshold) {
+ console.error("Binary conformance below " + binaryThreshold.toFixed(2) + "%: "
+ + binaryPercent.toFixed(2) + "% (" + binary.passed + "/" + binary.total + ")");
+ if (binary.failed)
+ console.error("Binary failures: " + binary.failed);
+ if (binary.skipped)
+ console.error("Binary skipped: " + binary.skipped);
+ process.exit(1);
+}
+
+console.log("Binary conformance: " + binaryPercent.toFixed(2) + "% (" + binary.passed + "/" + binary.total + ")");
diff --git a/tests/conformance/generate.js b/tests/conformance/generate.js
new file mode 100644
index 000000000..7cc4dcc3c
--- /dev/null
+++ b/tests/conformance/generate.js
@@ -0,0 +1,82 @@
+"use strict";
+
+var child_process = require("child_process"),
+ fs = require("fs"),
+ path = require("path");
+
+var rootDir = path.resolve(__dirname, "../.."),
+ upstreamDir = process.env.PROTOBUF_UPSTREAM || path.join(__dirname, "upstream"),
+ outputDir = path.join(__dirname, "generated"),
+ outputFile = path.join(outputDir, "messages.js"),
+ upstreamUnstableSchemaFile = "conformance/test_protos/test_messages_edition_unstable.proto",
+ unstableSchemaFile = path.join(outputDir, "test_messages_edition_unstable.proto"),
+ importRoots = [
+ "src",
+ "conformance",
+ "conformance/test_protos",
+ "editions/golden"
+ ],
+ schemaFiles = [
+ "conformance/conformance.proto",
+ "src/google/protobuf/test_messages_proto2.proto",
+ "src/google/protobuf/test_messages_proto3.proto",
+ "conformance/test_protos/test_messages_edition2023.proto",
+ "editions/golden/test_messages_proto2_editions.proto",
+ "editions/golden/test_messages_proto3_editions.proto"
+ ];
+
+if (!fs.existsSync(upstreamDir)) {
+ console.error("missing upstream protobuf checkout: " + upstreamDir);
+ process.exit(1);
+}
+
+upstreamDir = path.resolve(upstreamDir);
+
+fs.mkdirSync(outputDir, { recursive: true });
+
+// Upstream v34+ includes REQUIRED EditionUnstable conformance tests even with
+// --maximum_edition 2024. Optionally use a local stable-edition copy because
+// the parser intentionally only supports released editions.
+if (fs.existsSync(fromUpstream(upstreamUnstableSchemaFile))) {
+ fs.writeFileSync(
+ unstableSchemaFile,
+ fs.readFileSync(fromUpstream(upstreamUnstableSchemaFile), "utf8")
+ .replace("edition = \"UNSTABLE\";", "edition = \"2024\";")
+ );
+ schemaFiles.push(unstableSchemaFile);
+}
+
+runPbjs(importRoots.map(fromUpstream), schemaFiles.map(fromSchemaFile));
+
+function fromUpstream(relativePath) {
+ return path.join(upstreamDir, relativePath);
+}
+
+function fromSchemaFile(schemaFile) {
+ return path.isAbsolute(schemaFile) ? schemaFile : fromUpstream(schemaFile);
+}
+
+function runPbjs(importPaths, protoFiles) {
+ var args = [
+ path.join(rootDir, "cli/bin/pbjs"),
+ "-t", "static-module",
+ "-w", "commonjs",
+ "--dependency", "../../../minimal",
+ "-o", outputFile
+ ];
+
+ importPaths.forEach(function(importPath) {
+ args.push("-p", importPath);
+ });
+ Array.prototype.push.apply(args, protoFiles);
+
+ var result = child_process.spawnSync(process.execPath, args, {
+ cwd: rootDir,
+ stdio: "inherit"
+ });
+
+ if (result.error)
+ throw result.error;
+
+ process.exit(result.status || 0);
+}
diff --git a/tests/conformance/report.js b/tests/conformance/report.js
new file mode 100644
index 000000000..4b4f5aea9
--- /dev/null
+++ b/tests/conformance/report.js
@@ -0,0 +1,101 @@
+"use strict";
+
+var fs = require("fs"),
+ path = require("path"),
+ summary = require("./summary");
+
+var args = process.argv.slice(2),
+ jsonFile = null,
+ files = [],
+ report,
+ runnerSummary,
+ totals;
+
+args.forEach(function(arg, index) {
+ if (arg === "--json") {
+ jsonFile = args[index + 1];
+ } else if (index === 0 || args[index - 1] !== "--json") {
+ files.push(arg);
+ }
+});
+
+if (!files[0]) {
+ console.error("usage: node tests/conformance/report.js [test-list-log] [--json ]");
+ process.exit(1);
+}
+
+report = summary.read(files[0], files[1]);
+if (jsonFile) {
+ fs.mkdirSync(path.dirname(jsonFile), { recursive: true });
+ fs.writeFileSync(jsonFile, JSON.stringify(report, null, 2) + "\n");
+}
+
+if (!fs.existsSync(files[0])) {
+ console.log("No conformance log found.");
+ process.exit(0);
+}
+
+runnerSummary = report.summary;
+if (!runnerSummary) {
+ console.log("No conformance summary found.");
+ process.exit(0);
+}
+
+totals = report.totals;
+printTable([
+ metric("Binary passing", totals.byFormat.binary),
+ metric("ProtoJSON passing", totals.byFormat.json),
+ metric("Required passing", totals.byRequirement.required),
+ metric("Recommended passing", totals.byRequirement.recommended),
+ metric("Total passing", totals.overall),
+ ["Skipped", String(runnerSummary.skipped)],
+ ["Expected failures", String(runnerSummary.expectedFailures)],
+ ["Unexpected failures", String(runnerSummary.unexpectedFailures)]
+].filter(Boolean));
+
+function metric(label, value) {
+ if (value && value.total)
+ return [label, formatResult(value)];
+ return null;
+}
+
+function printTable(rows) {
+ var metricWidth = maxWidth(["Metric"].concat(rows.map(function(row) {
+ return row[0];
+ }))),
+ countWidth = maxWidth(["Count"].concat(rows.map(function(row) {
+ return row[1];
+ })));
+
+ console.log("| " + padRight("Metric", metricWidth) + " | " + padLeft("Count", countWidth) + " |");
+ console.log("| " + repeat("-", metricWidth) + " | " + repeat("-", countWidth) + ": |");
+ rows.forEach(function(row) {
+ console.log("| " + padRight(row[0], metricWidth) + " | " + padLeft(row[1], countWidth) + " |");
+ });
+}
+
+function formatResult(value) {
+ return pct(value.passPercent) + " (" + value.passed + "/" + value.total + ")";
+}
+
+function pct(value) {
+ return (value * 100).toFixed(2) + "%";
+}
+
+function maxWidth(values) {
+ return values.reduce(function(max, value) {
+ return Math.max(max, value.length);
+ }, 0);
+}
+
+function padLeft(value, width) {
+ return repeat(" ", width - value.length) + value;
+}
+
+function padRight(value, width) {
+ return value + repeat(" ", width - value.length);
+}
+
+function repeat(value, count) {
+ return new Array(count + 1).join(value);
+}
diff --git a/tests/conformance/summary.js b/tests/conformance/summary.js
new file mode 100644
index 000000000..8cb661083
--- /dev/null
+++ b/tests/conformance/summary.js
@@ -0,0 +1,199 @@
+"use strict";
+
+var fs = require("fs");
+
+exports.read = function(logFile, testListLogFile) {
+ var tests = readTests(testListLogFile),
+ failures = readFailures(logFile),
+ skips = readSkips(logFile);
+
+ return {
+ summary: readRunnerSummary(logFile),
+ totals: summarize(tests, failures, skips)
+ };
+};
+
+exports.readText = readText;
+
+function readTests(file) {
+ var log,
+ tests = Object.create(null),
+ pattern = /SKIPPED, test=([^\r\n ]+)/g,
+ match,
+ name;
+
+ if (!file || !fs.existsSync(file))
+ return [];
+
+ log = readText(file);
+ if (log.indexOf("CONFORMANCE SUITE") >= 0)
+ log = log.substring(0, log.indexOf("CONFORMANCE SUITE"));
+ while ((match = pattern.exec(log)) !== null) {
+ name = match[1];
+ tests[name] = classifyTest(name);
+ }
+ return Object.keys(tests).sort().map(function(testName) {
+ var test = tests[testName];
+ test.name = testName;
+ return test;
+ });
+}
+
+function readFailures(file) {
+ var log,
+ failures = Object.create(null),
+ pattern = /ERROR, test=([^\r\n :]+)/g,
+ match;
+
+ if (!file || !fs.existsSync(file))
+ return failures;
+
+ log = readText(file);
+ while ((match = pattern.exec(log)) !== null)
+ failures[match[1]] = true;
+ return failures;
+}
+
+function readSkips(file) {
+ var log,
+ skips = Object.create(null),
+ pattern = /SKIPPED, test=([^\r\n ]+)/g,
+ match;
+
+ if (!file || !fs.existsSync(file))
+ return skips;
+
+ log = readText(file);
+ while ((match = pattern.exec(log)) !== null)
+ skips[match[1]] = true;
+ return skips;
+}
+
+function readRunnerSummary(file) {
+ var match;
+
+ if (!file || !fs.existsSync(file))
+ return null;
+
+ match = /CONFORMANCE SUITE \w+: (\d+) successes, (\d+) skipped, (\d+) expected failures, (\d+) unexpected failures\./.exec(readText(file));
+ return match ? {
+ successes: Number(match[1]),
+ skipped: Number(match[2]),
+ expectedFailures: Number(match[3]),
+ unexpectedFailures: Number(match[4])
+ } : null;
+}
+
+function summarize(tests, failures, skips) {
+ return {
+ overall: summarizeTests(tests, failures, skips),
+ byRequirement: summarizeGroups(tests, failures, skips, "requirement", requirementOrder()),
+ byFormat: summarizeGroups(tests, failures, skips, "format", formatOrder()),
+ bySyntax: summarizeGroups(tests, failures, skips, "syntax", syntaxOrder())
+ };
+}
+
+function summarizeGroups(tests, failures, skips, property, groups) {
+ var out = Object.create(null);
+ groups.forEach(function(group) {
+ var groupTests = tests.filter(function(test) {
+ return test[property] === group.id;
+ });
+ if (groupTests.length)
+ out[group.id] = Object.assign({ id: group.id, label: group.label }, summarizeTests(groupTests, failures, skips));
+ });
+ return out;
+}
+
+function summarizeTests(tests, failures, skips) {
+ var total = tests.length,
+ passed = 0,
+ failed = 0,
+ skipped = 0;
+
+ tests.forEach(function(test) {
+ if (failures[test.name]) {
+ ++failed;
+ } else if (skips[test.name]) {
+ ++skipped;
+ } else {
+ ++passed;
+ }
+ });
+
+ return {
+ total: total,
+ passed: passed,
+ failed: failed,
+ skipped: skipped,
+ passPercent: percent(passed, total)
+ };
+}
+
+function classifyTest(name) {
+ return {
+ requirement: name.indexOf("Required.") === 0 ? "required" : "recommended",
+ format: classifyFormat(name),
+ syntax: classifySyntax(name)
+ };
+}
+
+function classifyFormat(name) {
+ if (/(^|\.)TextFormat(Input|Output)(\.|$)/.test(name))
+ return "textFormat";
+ if (/(^|\.)(JSPB|Jspb)(Input|Output)(\.|$)/.test(name))
+ return "jspb";
+ if (/(^|\.)Json(Input|Output)(\.|$)/.test(name) || /\.Validator$/.test(name))
+ return "json";
+ if (/(^|\.)Protobuf(Input|Output)(\.|$)/.test(name))
+ return "binary";
+ return "other";
+}
+
+function classifySyntax(name) {
+ var parts = name.split(".");
+ if (parts[1] === "Proto2")
+ return "proto2";
+ if (parts[1] === "Proto3")
+ return "proto3";
+ if (parts[1] === "Editions" || parts[1] === "Editions_Proto2" || parts[1] === "Editions_Proto3")
+ return "editions";
+ return "other";
+}
+
+function formatOrder() {
+ return [
+ { id: "binary", label: "Binary" },
+ { id: "json", label: "ProtoJSON" },
+ { id: "textFormat", label: "Text Format" },
+ { id: "jspb", label: "JSPB" },
+ { id: "other", label: "Other" }
+ ];
+}
+
+function syntaxOrder() {
+ return [
+ { id: "proto2", label: "proto2" },
+ { id: "proto3", label: "proto3" },
+ { id: "editions", label: "Editions" },
+ { id: "other", label: "Other" }
+ ];
+}
+
+function requirementOrder() {
+ return [
+ { id: "required", label: "Required" },
+ { id: "recommended", label: "Recommended" }
+ ];
+}
+
+function percent(value, total) {
+ return total ? value / total : 0;
+}
+
+function readText(file) {
+ var buffer = fs.readFileSync(file);
+ return buffer[0] === 0xff && buffer[1] === 0xfe
+ ? buffer.toString("utf16le")
+ : buffer.toString("utf8");
+}
diff --git a/tests/conformance/testee.js b/tests/conformance/testee.js
new file mode 100644
index 000000000..1653181a5
--- /dev/null
+++ b/tests/conformance/testee.js
@@ -0,0 +1,181 @@
+"use strict";
+
+var fs = require("fs"),
+ generated = require("./generated/messages.js"),
+ conformance = generated.conformance,
+ list = process.argv.indexOf("--list") >= 0,
+ testTypes = Object.create(null);
+
+var TEST_TYPES = [
+ {
+ name: "protobuf_test_messages.proto2.TestAllTypesProto2",
+ type: generated.protobuf_test_messages.proto2.TestAllTypesProto2
+ },
+ {
+ name: "protobuf_test_messages.proto3.TestAllTypesProto3",
+ type: generated.protobuf_test_messages.proto3.TestAllTypesProto3
+ },
+ {
+ name: "protobuf_test_messages.editions.proto2.TestAllTypesProto2",
+ type: generated.protobuf_test_messages.editions.proto2.TestAllTypesProto2
+ },
+ {
+ name: "protobuf_test_messages.editions.proto3.TestAllTypesProto3",
+ type: generated.protobuf_test_messages.editions.proto3.TestAllTypesProto3
+ },
+ {
+ name: "protobuf_test_messages.editions.TestAllTypesEdition2023",
+ type: generated.protobuf_test_messages.editions.TestAllTypesEdition2023
+ }
+];
+
+// Register the local stable-edition copy of UNSTABLE if included by generate.js.
+if (generated.protobuf_test_messages.edition_unstable) {
+ TEST_TYPES.push({
+ name: "protobuf_test_messages.edition_unstable.TestAllTypesEditionUnstable",
+ type: generated.protobuf_test_messages.edition_unstable.TestAllTypesEditionUnstable
+ });
+}
+
+TEST_TYPES.forEach(function(testType) {
+ if (!testType.type)
+ throw Error("missing generated test type: " + testType.name);
+ testTypes[testType.name] = testType.type;
+});
+
+// Keep stdout synchronous because it carries the framed testee protocol.
+if (process.stdout._handle)
+ process.stdout._handle.setBlocking(true);
+
+var count = 0,
+ body,
+ message,
+ request,
+ requestBuffer,
+ response,
+ sizeBuffer,
+ type;
+
+try {
+ for (;;) {
+ // Read the next length-prefixed ConformanceRequest from stdin.
+ sizeBuffer = readStdin(4);
+ if (!sizeBuffer)
+ break;
+
+ requestBuffer = readStdin(sizeBuffer.readInt32LE(0));
+ if (!requestBuffer)
+ throw Error("unexpected EOF while reading request");
+
+ request = conformance.ConformanceRequest.decode(requestBuffer);
+ response = null;
+ message = null;
+
+ // Build the ConformanceResponse for this request.
+ if (request.messageType === "conformance.FailureSet") {
+ response = {
+ protobufPayload: conformance.FailureSet.encode(conformance.FailureSet.create()).finish()
+ };
+ } else if (list) {
+ response = { skipped: "list mode" };
+ } else {
+ type = testTypes[request.messageType];
+ if (!type) {
+ response = { runtimeError: "unknown message type: " + request.messageType };
+ } else {
+ // Parse the request payload into the requested generated type.
+ try {
+ switch (request.payload) {
+ case "protobufPayload":
+ message = type.decode(request.protobufPayload);
+ break;
+ case "jsonPayload":
+ message = type.fromObject(JSON.parse(request.jsonPayload));
+ break;
+ case "jspbPayload":
+ response = { parseError: "JSPB not supported" };
+ break;
+ case "textPayload":
+ response = { parseError: "TextFormat not supported" };
+ break;
+ default:
+ response = { parseError: "unsupported format" };
+ break;
+ }
+ } catch (err) {
+ response = { parseError: String(err) };
+ }
+
+ if (!response) {
+ // Serialize the parsed message into the requested output format.
+ try {
+ switch (request.requestedOutputFormat) {
+ case conformance.WireFormat.PROTOBUF:
+ response = { protobufPayload: type.encode(message).finish() };
+ break;
+ case conformance.WireFormat.JSON:
+ response = {
+ jsonPayload: JSON.stringify(type.toObject(message, {
+ json: true,
+ bytes: String,
+ longs: String,
+ enums: String
+ }))
+ };
+ break;
+ case conformance.WireFormat.JSPB:
+ response = { skipped: "JSPB not supported" };
+ break;
+ case conformance.WireFormat.TEXT_FORMAT:
+ response = { skipped: "text format not supported" };
+ break;
+ default:
+ response = { runtimeError: "unknown output format: " + request.requestedOutputFormat };
+ break;
+ }
+ } catch (err) {
+ response = { serializeError: String(err) };
+ }
+ }
+ }
+ }
+
+ // Write the length-prefixed ConformanceResponse to stdout.
+ body = conformance.ConformanceResponse.encode(
+ conformance.ConformanceResponse.create(response)
+ ).finish();
+ sizeBuffer = Buffer.alloc(4);
+ sizeBuffer.writeInt32LE(body.length, 0);
+ writeStdout(sizeBuffer);
+ writeStdout(Buffer.from(body));
+ ++count;
+ }
+} catch (err) {
+ process.stderr.write(
+ "conformance testee failed after " + count + " tests: "
+ + (err && err.stack || String(err)) + "\n"
+ );
+ process.exit(1);
+}
+
+function readStdin(size) {
+ var buffer = Buffer.alloc(size),
+ offset = 0,
+ read;
+ while (offset < size) {
+ read = fs.readSync(0, buffer, offset, size - offset, null);
+ if (read === 0) {
+ if (offset === 0)
+ return null;
+ throw Error("unexpected EOF");
+ }
+ offset += read;
+ }
+ return buffer;
+}
+
+function writeStdout(buffer) {
+ var offset = 0;
+ while (offset < buffer.length)
+ offset += fs.writeSync(1, buffer, offset, buffer.length - offset);
+}