Skip to content

Commit 3765fb2

Browse files
auto-docs: Update RPCN connector docs (#419)
1 parent c12601c commit 3765fb2

12 files changed

Lines changed: 1155 additions & 210 deletions

File tree

antora.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ asciidoc:
1414
project-github: redpanda-data/connect
1515
full-version: 24.1.4
1616
latest-release-commit: b5ade3f40
17-
latest-connect-version: 4.88.0
17+
latest-connect-version: 4.89.0
1818
page-home-intro: >-
1919
Redpanda Connect is a data streaming service for building scalable, high-performance data pipelines that drive
2020
real-time analytics and actionable business insights. Integrate data across systems with hundreds of prebuilt
Lines changed: 945 additions & 127 deletions
Large diffs are not rendered by default.

docs-data/connect-diff-4.87.0_to_4.88.0.json renamed to docs-data/connect-diff-4.88.0_to_4.89.0.json

Lines changed: 137 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
{
22
"comparison": {
3-
"oldVersion": "4.87.0",
4-
"newVersion": "4.88.0",
5-
"timestamp": "2026-04-17T11:58:02.300Z"
3+
"oldVersion": "4.88.0",
4+
"newVersion": "4.89.0",
5+
"timestamp": "2026-04-23T16:42:07.496Z"
66
},
77
"summary": {
88
"newComponents": 0,
@@ -12,7 +12,7 @@
1212
"deprecatedComponents": 0,
1313
"deprecatedFields": 0,
1414
"changedDefaults": 0,
15-
"platformTransitions": 0,
15+
"platformTransitions": 2,
1616
"newBloblangMethods": 0,
1717
"removedBloblangMethods": 0,
1818
"newBloblangFunctions": 0,
@@ -25,16 +25,52 @@
2525
"removedComponents": [],
2626
"newFields": [
2727
{
28-
"component": "inputs:oracledb_cdc",
29-
"field": "pdb_name",
30-
"description": "The name of the pluggable database (PDB) to monitor. When connecting to a CDB root, LogMiner output is scoped to this PDB via SRC_CON_NAME filtering and catalog queries use ALTER SESSION SET CONTAINER to switch context. Requires GRANT SET CONTAINER TO <user> CONTAINER=ALL."
28+
"component": "processors:parquet_encode",
29+
"field": "default_timestamp_unit",
30+
"introducedIn": "4.89.0",
31+
"description": "The precision used when encoding TIMESTAMP logical types. The default `NANOSECOND` matches historical behaviour, but `TIMESTAMP(NANOS)` is not readable by Apache Spark (Databricks), AWS Athena or DuckDB; set this to `MICROSECOND` (or `MILLISECOND`) when writing Parquet files intended for consumption by those engines."
3132
}
3233
],
3334
"removedFields": [],
3435
"deprecatedComponents": [],
3536
"deprecatedFields": [],
3637
"changedDefaults": [],
37-
"platformTransitions": [],
38+
"platformTransitions": [
39+
{
40+
"name": "salesforce_sink",
41+
"type": "outputs",
42+
"transitions": [
43+
"added_cloud_support"
44+
],
45+
"oldPlatform": {
46+
"requiresCgo": false,
47+
"cloudSupported": false,
48+
"cloudOnly": false
49+
},
50+
"newPlatform": {
51+
"requiresCgo": false,
52+
"cloudSupported": true,
53+
"cloudOnly": false
54+
}
55+
},
56+
{
57+
"name": "salesforce",
58+
"type": "processors",
59+
"transitions": [
60+
"added_cloud_support"
61+
],
62+
"oldPlatform": {
63+
"requiresCgo": false,
64+
"cloudSupported": false,
65+
"cloudOnly": false
66+
},
67+
"newPlatform": {
68+
"requiresCgo": false,
69+
"cloudSupported": true,
70+
"cloudOnly": false
71+
}
72+
}
73+
],
3874
"newBloblangMethods": [],
3975
"removedBloblangMethods": [],
4076
"newBloblangFunctions": [],
@@ -44,16 +80,63 @@
4480
},
4581
"binaryAnalysis": {
4682
"versions": {
47-
"oss": "4.88.0",
48-
"cloud": "4.88.0",
49-
"cgo": "4.88.0"
83+
"oss": "4.89.0",
84+
"cloud": "4.89.0",
85+
"cgo": "4.89.0"
5086
},
5187
"current": {
52-
"cloudSupported": 480,
53-
"selfHostedOnly": 82,
54-
"cgoOnly": 0
88+
"cloudSupported": 481,
89+
"selfHostedOnly": 76,
90+
"cgoOnly": 4
91+
},
92+
"changes": {
93+
"cloud": {
94+
"added": [
95+
{
96+
"type": "outputs",
97+
"name": "salesforce_sink",
98+
"status": "stable"
99+
},
100+
{
101+
"type": "processors",
102+
"name": "salesforce",
103+
"status": "stable"
104+
}
105+
],
106+
"removed": [
107+
{
108+
"type": "processors",
109+
"name": "a2a_message",
110+
"status": "stable"
111+
}
112+
]
113+
},
114+
"cgo": {
115+
"newCgoOnly": [
116+
{
117+
"type": "inputs",
118+
"name": "tigerbeetle_cdc",
119+
"status": "stable"
120+
},
121+
{
122+
"type": "inputs",
123+
"name": "zmq4",
124+
"status": "stable"
125+
},
126+
{
127+
"type": "outputs",
128+
"name": "zmq4",
129+
"status": "stable"
130+
},
131+
{
132+
"type": "processors",
133+
"name": "ffi",
134+
"status": "stable"
135+
}
136+
],
137+
"removedCgoOnly": []
138+
}
55139
},
56-
"changes": {},
57140
"details": {
58141
"cloudSupported": [
59142
{
@@ -766,6 +849,11 @@
766849
"name": "retry",
767850
"status": "stable"
768851
},
852+
{
853+
"type": "outputs",
854+
"name": "salesforce_sink",
855+
"status": "stable"
856+
},
769857
{
770858
"type": "outputs",
771859
"name": "schema_registry",
@@ -1121,6 +1209,11 @@
11211209
"name": "retry",
11221210
"status": "beta"
11231211
},
1212+
{
1213+
"type": "processors",
1214+
"name": "salesforce",
1215+
"status": "stable"
1216+
},
11241217
{
11251218
"type": "processors",
11261219
"name": "schema_registry_decode",
@@ -1211,11 +1304,6 @@
12111304
"name": "xml",
12121305
"status": "stable"
12131306
},
1214-
{
1215-
"type": "processors",
1216-
"name": "a2a_message",
1217-
"status": "stable"
1218-
},
12191307
{
12201308
"type": "rate-limits",
12211309
"name": "local",
@@ -2578,16 +2666,6 @@
25782666
"name": "websocket",
25792667
"status": "stable"
25802668
},
2581-
{
2582-
"type": "inputs",
2583-
"name": "tigerbeetle_cdc",
2584-
"status": "stable"
2585-
},
2586-
{
2587-
"type": "inputs",
2588-
"name": "zmq4",
2589-
"status": "stable"
2590-
},
25912669
{
25922670
"type": "outputs",
25932671
"name": "amqp_1",
@@ -2673,11 +2751,6 @@
26732751
"name": "pusher",
26742752
"status": "stable"
26752753
},
2676-
{
2677-
"type": "outputs",
2678-
"name": "salesforce_sink",
2679-
"status": "stable"
2680-
},
26812754
{
26822755
"type": "outputs",
26832756
"name": "socket",
@@ -2703,11 +2776,6 @@
27032776
"name": "websocket",
27042777
"status": "stable"
27052778
},
2706-
{
2707-
"type": "outputs",
2708-
"name": "zmq4",
2709-
"status": "stable"
2710-
},
27112779
{
27122780
"type": "processors",
27132781
"name": "awk",
@@ -2773,11 +2841,6 @@
27732841
"name": "redpanda_data_transform",
27742842
"status": "stable"
27752843
},
2776-
{
2777-
"type": "processors",
2778-
"name": "salesforce",
2779-
"status": "stable"
2780-
},
27812844
{
27822845
"type": "processors",
27832846
"name": "sentry_capture",
@@ -2798,11 +2861,6 @@
27982861
"name": "wasm",
27992862
"status": "stable"
28002863
},
2801-
{
2802-
"type": "processors",
2803-
"name": "ffi",
2804-
"status": "stable"
2805-
},
28062864
{
28072865
"type": "metrics",
28082866
"name": "aws_cloudwatch",
@@ -2869,8 +2927,35 @@
28692927
"status": "stable"
28702928
}
28712929
],
2872-
"cloudOnly": [],
2873-
"cgoOnly": []
2930+
"cloudOnly": [
2931+
{
2932+
"type": "processors",
2933+
"name": "a2a_message",
2934+
"status": "stable"
2935+
}
2936+
],
2937+
"cgoOnly": [
2938+
{
2939+
"type": "inputs",
2940+
"name": "tigerbeetle_cdc",
2941+
"status": "stable"
2942+
},
2943+
{
2944+
"type": "inputs",
2945+
"name": "zmq4",
2946+
"status": "stable"
2947+
},
2948+
{
2949+
"type": "outputs",
2950+
"name": "zmq4",
2951+
"status": "stable"
2952+
},
2953+
{
2954+
"type": "processors",
2955+
"name": "ffi",
2956+
"status": "stable"
2957+
}
2958+
]
28742959
}
28752960
}
28762961
}

modules/components/attachments/connect-4.88.0.json renamed to modules/components/attachments/connect-4.89.0.json

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
2-
"version": "4.88.0",
3-
"date": "2026-04-16T17:23:13Z",
2+
"version": "4.89.0",
3+
"date": "2026-04-23T16:20:40Z",
44
"config": [
55
{
66
"name": "http",
@@ -14809,7 +14809,7 @@
1480914809
"status": "stable",
1481014810
"plugin": true,
1481114811
"summary": "Enables Change Data Capture by consuming from OracleDB.",
14812-
"description": "Streams changes from an Oracle database for Change Data Capture (CDC).\nAdditionally, if `stream_snapshot` is set to true, then the existing data in the database is also streamed too.\n\n== Metadata\n\nThis input adds the following metadata fields to each message:\n\n- database_schema: The database schema for the table where the message originates from.\n- table_name: Name of the table that the message originated from.\n- operation: Type of operation that generated the message: \"read\", \"delete\", \"insert\", or \"update\". \"read\" is from messages that are read in the initial snapshot phase.\n- scn: the System Change Number in Oracle.\n- source_ts_ms: The timestamp of when Oracle wrote the change record into the redo log, expressed as milliseconds since the Unix epoch. This reflects the database server's wall-clock time at the moment the DML executed, not the transaction commit time.\n- schema: The table schema, for use with schema-aware downstream processors such as `schema_registry_encode`. When new columns are detected in CDC events, the schema is automatically refreshed from the Oracle catalog. Dropped columns are reflected after a connector restart.\n\n== Permissions\n\nWhen using the default Oracle based cache, the Connect user requires permission to create tables and stored procedures, and the rpcn schema must already exist. Refer to `checkpoint_cache_table_name` for more information.\n\t\t",
14812+
"description": "Streams changes from an Oracle database for Change Data Capture (CDC).\nAdditionally, if `stream_snapshot` is set to true, then the existing data in the database is also streamed too.\n\n== Metadata\n\nThis input adds the following metadata fields to each message:\n\n- database_schema: The database schema for the table where the message originates from.\n- table_name: Name of the table that the message originated from.\n- operation: Type of operation that generated the message: \"read\", \"delete\", \"insert\", or \"update\". \"read\" is from messages that are read in the initial snapshot phase.\n- scn: the System Change Number in Oracle.\n- transaction_id: The Oracle transaction ID in `USN.SLOT.SEQ` format, identifying the transaction that produced the change. Not present on snapshot (`read`) messages.\n- source_ts_ms: The timestamp of when Oracle wrote the change record into the redo log, expressed as milliseconds since the Unix epoch. This reflects the database server's wall-clock time at the moment the DML executed, not the transaction commit time.\n- commit_ts_ms: The timestamp of the transaction commit, expressed as milliseconds since the Unix epoch. Not present on snapshot (`read`) messages.\n- schema: The table schema, for use with schema-aware downstream processors such as `schema_registry_encode`. When new columns are detected in CDC events, the schema is automatically refreshed from the Oracle catalog. Dropped columns are reflected after a connector restart.\n\n== Permissions\n\nWhen using the default Oracle based cache, the Connect user requires permission to create tables and stored procedures, and the rpcn schema must already exist. Refer to `checkpoint_cache_table_name` for more information.\n\t\t",
1481314813
"categories": [
1481414814
"Services"
1481514815
],
@@ -14985,7 +14985,7 @@
1498514985
"name": "pdb_name",
1498614986
"type": "string",
1498714987
"kind": "scalar",
14988-
"description": "The name of the pluggable database (PDB) to monitor. When connecting to a CDB root, LogMiner output is scoped to this PDB via SRC_CON_NAME filtering and catalog queries use ALTER SESSION SET CONTAINER to switch context. Requires `GRANT SET CONTAINER TO &lt;user&gt; CONTAINER=ALL`.",
14988+
"description": "The name of the pluggable database (PDB) to monitor. When connecting to a CDB root, LogMiner output is scoped to this PDB via SRC_CON_NAME filtering and catalog queries use ALTER SESSION SET CONTAINER to switch context. Requires GRANT SET CONTAINER TO <user> CONTAINER=ALL.",
1498914989
"is_optional": true
1499014990
},
1499114991
{
@@ -43239,7 +43239,7 @@
4323943239
}
4324043240
]
4324143241
},
43242-
"cloudSupported": false,
43242+
"cloudSupported": true,
4324343243
"requiresCgo": false
4324443244
},
4324543245
{
@@ -54546,6 +54546,21 @@
5454654546
],
5454754547
"version": "4.11.0",
5454854548
"linter": "\nlet options = {\n \"delta_length_byte_array\": true,\n \"plain\": true,\n}\n\nroot = if !$options.exists(this.string().lowercase()) {\n {\"type\": 2, \"what\": \"value %v is not a valid option for this field\".format(this.string())}\n}\n"
54549+
},
54550+
{
54551+
"name": "default_timestamp_unit",
54552+
"type": "string",
54553+
"kind": "scalar",
54554+
"description": "The precision used when encoding TIMESTAMP logical types. The default `NANOSECOND` matches historical behaviour, but `TIMESTAMP(NANOS)` is not readable by Apache Spark (Databricks), AWS Athena or DuckDB; set this to `MICROSECOND` (or `MILLISECOND`) when writing Parquet files intended for consumption by those engines.",
54555+
"is_advanced": true,
54556+
"default": "NANOSECOND",
54557+
"options": [
54558+
"NANOSECOND",
54559+
"MICROSECOND",
54560+
"MILLISECOND"
54561+
],
54562+
"version": "4.89.0",
54563+
"linter": "\nlet options = {\n \"nanosecond\": true,\n \"microsecond\": true,\n \"millisecond\": true,\n}\n\nroot = if !$options.exists(this.string().lowercase()) {\n {\"type\": 2, \"what\": \"value %v is not a valid option for this field\".format(this.string())}\n}\n"
5454954564
}
5455054565
],
5455154566
"linter": "root = if this.schema.or([]).length() == 0 && this.schema_metadata.or(\"\") == \"\" { \"either a schema or schema_metadata must be specified\" }"
@@ -56015,7 +56030,7 @@
5601556030
}
5601656031
]
5601756032
},
56018-
"cloudSupported": false,
56033+
"cloudSupported": true,
5601956034
"requiresCgo": false
5602056035
},
5602156036
{
@@ -59347,7 +59362,6 @@
5934759362
"status": "stable",
5934859363
"plugin": true,
5934959364
"summary": "Send metrics to an https://opentelemetry.io/docs/collector/[Open Telemetry collector^].",
59350-
"description": "\nExports Redpanda Connect metrics to one or more OpenTelemetry Collector endpoints over HTTP or gRPC for aggregation and onward export.\n\nMetrics are encoded using the OpenTelemetry Metrics protocol and can be sent to any collector endpoint that supports OTLP.\n",
5935159365
"categories": null,
5935259366
"config": {
5935359367
"name": "",
@@ -59433,7 +59447,7 @@
5943359447
"name": "tags",
5943459448
"type": "string",
5943559449
"kind": "map",
59436-
"description": "A map of tags to add to all exported metrics.",
59450+
"description": "A map of tags to add to all exported spans and metrics.",
5943759451
"is_advanced": true,
5943859452
"default": {}
5943959453
}
@@ -59900,7 +59914,7 @@
5990059914
"name": "tags",
5990159915
"type": "string",
5990259916
"kind": "map",
59903-
"description": "A map of tags to add to all exported spans.",
59917+
"description": "A map of tags to add to all exported spans and metrics.",
5990459918
"is_advanced": true,
5990559919
"default": {}
5990659920
},

0 commit comments

Comments
 (0)