|
1 | 1 | { |
2 | | - "version": "4.88.0", |
3 | | - "date": "2026-04-16T17:23:13Z", |
| 2 | + "version": "4.89.0", |
| 3 | + "date": "2026-04-23T16:20:40Z", |
4 | 4 | "config": [ |
5 | 5 | { |
6 | 6 | "name": "http", |
|
14809 | 14809 | "status": "stable", |
14810 | 14810 | "plugin": true, |
14811 | 14811 | "summary": "Enables Change Data Capture by consuming from OracleDB.", |
14812 | | - "description": "Streams changes from an Oracle database for Change Data Capture (CDC).\nAdditionally, if `stream_snapshot` is set to true, then the existing data in the database is also streamed too.\n\n== Metadata\n\nThis input adds the following metadata fields to each message:\n\n- database_schema: The database schema for the table where the message originates from.\n- table_name: Name of the table that the message originated from.\n- operation: Type of operation that generated the message: \"read\", \"delete\", \"insert\", or \"update\". \"read\" is from messages that are read in the initial snapshot phase.\n- scn: the System Change Number in Oracle.\n- source_ts_ms: The timestamp of when Oracle wrote the change record into the redo log, expressed as milliseconds since the Unix epoch. This reflects the database server's wall-clock time at the moment the DML executed, not the transaction commit time.\n- schema: The table schema, for use with schema-aware downstream processors such as `schema_registry_encode`. When new columns are detected in CDC events, the schema is automatically refreshed from the Oracle catalog. Dropped columns are reflected after a connector restart.\n\n== Permissions\n\nWhen using the default Oracle based cache, the Connect user requires permission to create tables and stored procedures, and the rpcn schema must already exist. Refer to `checkpoint_cache_table_name` for more information.\n\t\t", |
| 14812 | + "description": "Streams changes from an Oracle database for Change Data Capture (CDC).\nAdditionally, if `stream_snapshot` is set to true, then the existing data in the database is also streamed too.\n\n== Metadata\n\nThis input adds the following metadata fields to each message:\n\n- database_schema: The database schema for the table where the message originates from.\n- table_name: Name of the table that the message originated from.\n- operation: Type of operation that generated the message: \"read\", \"delete\", \"insert\", or \"update\". \"read\" is from messages that are read in the initial snapshot phase.\n- scn: the System Change Number in Oracle.\n- transaction_id: The Oracle transaction ID in `USN.SLOT.SEQ` format, identifying the transaction that produced the change. Not present on snapshot (`read`) messages.\n- source_ts_ms: The timestamp of when Oracle wrote the change record into the redo log, expressed as milliseconds since the Unix epoch. This reflects the database server's wall-clock time at the moment the DML executed, not the transaction commit time.\n- commit_ts_ms: The timestamp of the transaction commit, expressed as milliseconds since the Unix epoch. Not present on snapshot (`read`) messages.\n- schema: The table schema, for use with schema-aware downstream processors such as `schema_registry_encode`. When new columns are detected in CDC events, the schema is automatically refreshed from the Oracle catalog. Dropped columns are reflected after a connector restart.\n\n== Permissions\n\nWhen using the default Oracle based cache, the Connect user requires permission to create tables and stored procedures, and the rpcn schema must already exist. Refer to `checkpoint_cache_table_name` for more information.\n\t\t", |
14813 | 14813 | "categories": [ |
14814 | 14814 | "Services" |
14815 | 14815 | ], |
|
14985 | 14985 | "name": "pdb_name", |
14986 | 14986 | "type": "string", |
14987 | 14987 | "kind": "scalar", |
14988 | | - "description": "The name of the pluggable database (PDB) to monitor. When connecting to a CDB root, LogMiner output is scoped to this PDB via SRC_CON_NAME filtering and catalog queries use ALTER SESSION SET CONTAINER to switch context. Requires `GRANT SET CONTAINER TO <user> CONTAINER=ALL`.", |
| 14988 | + "description": "The name of the pluggable database (PDB) to monitor. When connecting to a CDB root, LogMiner output is scoped to this PDB via SRC_CON_NAME filtering and catalog queries use ALTER SESSION SET CONTAINER to switch context. Requires GRANT SET CONTAINER TO <user> CONTAINER=ALL.", |
14989 | 14989 | "is_optional": true |
14990 | 14990 | }, |
14991 | 14991 | { |
|
43239 | 43239 | } |
43240 | 43240 | ] |
43241 | 43241 | }, |
43242 | | - "cloudSupported": false, |
| 43242 | + "cloudSupported": true, |
43243 | 43243 | "requiresCgo": false |
43244 | 43244 | }, |
43245 | 43245 | { |
|
54546 | 54546 | ], |
54547 | 54547 | "version": "4.11.0", |
54548 | 54548 | "linter": "\nlet options = {\n \"delta_length_byte_array\": true,\n \"plain\": true,\n}\n\nroot = if !$options.exists(this.string().lowercase()) {\n {\"type\": 2, \"what\": \"value %v is not a valid option for this field\".format(this.string())}\n}\n" |
| 54549 | + }, |
| 54550 | + { |
| 54551 | + "name": "default_timestamp_unit", |
| 54552 | + "type": "string", |
| 54553 | + "kind": "scalar", |
| 54554 | + "description": "The precision used when encoding TIMESTAMP logical types. The default `NANOSECOND` matches historical behaviour, but `TIMESTAMP(NANOS)` is not readable by Apache Spark (Databricks), AWS Athena or DuckDB; set this to `MICROSECOND` (or `MILLISECOND`) when writing Parquet files intended for consumption by those engines.", |
| 54555 | + "is_advanced": true, |
| 54556 | + "default": "NANOSECOND", |
| 54557 | + "options": [ |
| 54558 | + "NANOSECOND", |
| 54559 | + "MICROSECOND", |
| 54560 | + "MILLISECOND" |
| 54561 | + ], |
| 54562 | + "version": "4.89.0", |
| 54563 | + "linter": "\nlet options = {\n \"nanosecond\": true,\n \"microsecond\": true,\n \"millisecond\": true,\n}\n\nroot = if !$options.exists(this.string().lowercase()) {\n {\"type\": 2, \"what\": \"value %v is not a valid option for this field\".format(this.string())}\n}\n" |
54549 | 54564 | } |
54550 | 54565 | ], |
54551 | 54566 | "linter": "root = if this.schema.or([]).length() == 0 && this.schema_metadata.or(\"\") == \"\" { \"either a schema or schema_metadata must be specified\" }" |
|
56015 | 56030 | } |
56016 | 56031 | ] |
56017 | 56032 | }, |
56018 | | - "cloudSupported": false, |
| 56033 | + "cloudSupported": true, |
56019 | 56034 | "requiresCgo": false |
56020 | 56035 | }, |
56021 | 56036 | { |
|
59347 | 59362 | "status": "stable", |
59348 | 59363 | "plugin": true, |
59349 | 59364 | "summary": "Send metrics to an https://opentelemetry.io/docs/collector/[Open Telemetry collector^].", |
59350 | | - "description": "\nExports Redpanda Connect metrics to one or more OpenTelemetry Collector endpoints over HTTP or gRPC for aggregation and onward export.\n\nMetrics are encoded using the OpenTelemetry Metrics protocol and can be sent to any collector endpoint that supports OTLP.\n", |
59351 | 59365 | "categories": null, |
59352 | 59366 | "config": { |
59353 | 59367 | "name": "", |
|
59433 | 59447 | "name": "tags", |
59434 | 59448 | "type": "string", |
59435 | 59449 | "kind": "map", |
59436 | | - "description": "A map of tags to add to all exported metrics.", |
| 59450 | + "description": "A map of tags to add to all exported spans and metrics.", |
59437 | 59451 | "is_advanced": true, |
59438 | 59452 | "default": {} |
59439 | 59453 | } |
|
59900 | 59914 | "name": "tags", |
59901 | 59915 | "type": "string", |
59902 | 59916 | "kind": "map", |
59903 | | - "description": "A map of tags to add to all exported spans.", |
| 59917 | + "description": "A map of tags to add to all exported spans and metrics.", |
59904 | 59918 | "is_advanced": true, |
59905 | 59919 | "default": {} |
59906 | 59920 | }, |
|
0 commit comments